common/mlx5: prepare support of packet pacing
[dpdk.git] / drivers / common / mlx5 / mlx5_devx_cmds.c
1 // SPDX-License-Identifier: BSD-3-Clause
2 /* Copyright 2018 Mellanox Technologies, Ltd */
3
4 #include <unistd.h>
5
6 #include <rte_errno.h>
7 #include <rte_malloc.h>
8
9 #include "mlx5_prm.h"
10 #include "mlx5_devx_cmds.h"
11 #include "mlx5_common_utils.h"
12
13
14 /**
15  * Allocate flow counters via devx interface.
16  *
17  * @param[in] ctx
18  *   Context returned from mlx5 open_device() glue function.
19  * @param dcs
20  *   Pointer to counters properties structure to be filled by the routine.
21  * @param bulk_n_128
22  *   Bulk counter numbers in 128 counters units.
23  *
24  * @return
25  *   Pointer to counter object on success, a negative value otherwise and
26  *   rte_errno is set.
27  */
28 struct mlx5_devx_obj *
29 mlx5_devx_cmd_flow_counter_alloc(void *ctx, uint32_t bulk_n_128)
30 {
31         struct mlx5_devx_obj *dcs = rte_zmalloc("dcs", sizeof(*dcs), 0);
32         uint32_t in[MLX5_ST_SZ_DW(alloc_flow_counter_in)]   = {0};
33         uint32_t out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
34
35         if (!dcs) {
36                 rte_errno = ENOMEM;
37                 return NULL;
38         }
39         MLX5_SET(alloc_flow_counter_in, in, opcode,
40                  MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
41         MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, bulk_n_128);
42         dcs->obj = mlx5_glue->devx_obj_create(ctx, in,
43                                               sizeof(in), out, sizeof(out));
44         if (!dcs->obj) {
45                 DRV_LOG(ERR, "Can't allocate counters - error %d", errno);
46                 rte_errno = errno;
47                 rte_free(dcs);
48                 return NULL;
49         }
50         dcs->id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
51         return dcs;
52 }
53
54 /**
55  * Query flow counters values.
56  *
57  * @param[in] dcs
58  *   devx object that was obtained from mlx5_devx_cmd_fc_alloc.
59  * @param[in] clear
60  *   Whether hardware should clear the counters after the query or not.
61  * @param[in] n_counters
62  *   0 in case of 1 counter to read, otherwise the counter number to read.
63  *  @param pkts
64  *   The number of packets that matched the flow.
65  *  @param bytes
66  *    The number of bytes that matched the flow.
67  *  @param mkey
68  *   The mkey key for batch query.
69  *  @param addr
70  *    The address in the mkey range for batch query.
71  *  @param cmd_comp
72  *   The completion object for asynchronous batch query.
73  *  @param async_id
74  *    The ID to be returned in the asynchronous batch query response.
75  *
76  * @return
77  *   0 on success, a negative value otherwise.
78  */
79 int
80 mlx5_devx_cmd_flow_counter_query(struct mlx5_devx_obj *dcs,
81                                  int clear, uint32_t n_counters,
82                                  uint64_t *pkts, uint64_t *bytes,
83                                  uint32_t mkey, void *addr,
84                                  void *cmd_comp,
85                                  uint64_t async_id)
86 {
87         int out_len = MLX5_ST_SZ_BYTES(query_flow_counter_out) +
88                         MLX5_ST_SZ_BYTES(traffic_counter);
89         uint32_t out[out_len];
90         uint32_t in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
91         void *stats;
92         int rc;
93
94         MLX5_SET(query_flow_counter_in, in, opcode,
95                  MLX5_CMD_OP_QUERY_FLOW_COUNTER);
96         MLX5_SET(query_flow_counter_in, in, op_mod, 0);
97         MLX5_SET(query_flow_counter_in, in, flow_counter_id, dcs->id);
98         MLX5_SET(query_flow_counter_in, in, clear, !!clear);
99
100         if (n_counters) {
101                 MLX5_SET(query_flow_counter_in, in, num_of_counters,
102                          n_counters);
103                 MLX5_SET(query_flow_counter_in, in, dump_to_memory, 1);
104                 MLX5_SET(query_flow_counter_in, in, mkey, mkey);
105                 MLX5_SET64(query_flow_counter_in, in, address,
106                            (uint64_t)(uintptr_t)addr);
107         }
108         if (!cmd_comp)
109                 rc = mlx5_glue->devx_obj_query(dcs->obj, in, sizeof(in), out,
110                                                out_len);
111         else
112                 rc = mlx5_glue->devx_obj_query_async(dcs->obj, in, sizeof(in),
113                                                      out_len, async_id,
114                                                      cmd_comp);
115         if (rc) {
116                 DRV_LOG(ERR, "Failed to query devx counters with rc %d", rc);
117                 rte_errno = rc;
118                 return -rc;
119         }
120         if (!n_counters) {
121                 stats = MLX5_ADDR_OF(query_flow_counter_out,
122                                      out, flow_statistics);
123                 *pkts = MLX5_GET64(traffic_counter, stats, packets);
124                 *bytes = MLX5_GET64(traffic_counter, stats, octets);
125         }
126         return 0;
127 }
128
129 /**
130  * Create a new mkey.
131  *
132  * @param[in] ctx
133  *   Context returned from mlx5 open_device() glue function.
134  * @param[in] attr
135  *   Attributes of the requested mkey.
136  *
137  * @return
138  *   Pointer to Devx mkey on success, a negative value otherwise and rte_errno
139  *   is set.
140  */
141 struct mlx5_devx_obj *
142 mlx5_devx_cmd_mkey_create(void *ctx,
143                           struct mlx5_devx_mkey_attr *attr)
144 {
145         struct mlx5_klm *klm_array = attr->klm_array;
146         int klm_num = attr->klm_num;
147         int in_size_dw = MLX5_ST_SZ_DW(create_mkey_in) +
148                      (klm_num ? RTE_ALIGN(klm_num, 4) : 0) * MLX5_ST_SZ_DW(klm);
149         uint32_t in[in_size_dw];
150         uint32_t out[MLX5_ST_SZ_DW(create_mkey_out)] = {0};
151         void *mkc;
152         struct mlx5_devx_obj *mkey = rte_zmalloc("mkey", sizeof(*mkey), 0);
153         size_t pgsize;
154         uint32_t translation_size;
155
156         if (!mkey) {
157                 rte_errno = ENOMEM;
158                 return NULL;
159         }
160         memset(in, 0, in_size_dw * 4);
161         pgsize = sysconf(_SC_PAGESIZE);
162         MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
163         mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
164         if (klm_num > 0) {
165                 int i;
166                 uint8_t *klm = (uint8_t *)MLX5_ADDR_OF(create_mkey_in, in,
167                                                        klm_pas_mtt);
168                 translation_size = RTE_ALIGN(klm_num, 4);
169                 for (i = 0; i < klm_num; i++) {
170                         MLX5_SET(klm, klm, byte_count, klm_array[i].byte_count);
171                         MLX5_SET(klm, klm, mkey, klm_array[i].mkey);
172                         MLX5_SET64(klm, klm, address, klm_array[i].address);
173                         klm += MLX5_ST_SZ_BYTES(klm);
174                 }
175                 for (; i < (int)translation_size; i++) {
176                         MLX5_SET(klm, klm, mkey, 0x0);
177                         MLX5_SET64(klm, klm, address, 0x0);
178                         klm += MLX5_ST_SZ_BYTES(klm);
179                 }
180                 MLX5_SET(mkc, mkc, access_mode_1_0, attr->log_entity_size ?
181                          MLX5_MKC_ACCESS_MODE_KLM_FBS :
182                          MLX5_MKC_ACCESS_MODE_KLM);
183                 MLX5_SET(mkc, mkc, log_page_size, attr->log_entity_size);
184         } else {
185                 translation_size = (RTE_ALIGN(attr->size, pgsize) * 8) / 16;
186                 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
187                 MLX5_SET(mkc, mkc, log_page_size, rte_log2_u32(pgsize));
188         }
189         MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
190                  translation_size);
191         MLX5_SET(create_mkey_in, in, mkey_umem_id, attr->umem_id);
192         MLX5_SET(create_mkey_in, in, pg_access, attr->pg_access);
193         MLX5_SET(mkc, mkc, lw, 0x1);
194         MLX5_SET(mkc, mkc, lr, 0x1);
195         MLX5_SET(mkc, mkc, qpn, 0xffffff);
196         MLX5_SET(mkc, mkc, pd, attr->pd);
197         MLX5_SET(mkc, mkc, mkey_7_0, attr->umem_id & 0xFF);
198         MLX5_SET(mkc, mkc, translations_octword_size, translation_size);
199         if (attr->relaxed_ordering == 1) {
200                 MLX5_SET(mkc, mkc, relaxed_ordering_write, 0x1);
201                 MLX5_SET(mkc, mkc, relaxed_ordering_read, 0x1);
202         }
203         MLX5_SET64(mkc, mkc, start_addr, attr->addr);
204         MLX5_SET64(mkc, mkc, len, attr->size);
205         mkey->obj = mlx5_glue->devx_obj_create(ctx, in, in_size_dw * 4, out,
206                                                sizeof(out));
207         if (!mkey->obj) {
208                 DRV_LOG(ERR, "Can't create %sdirect mkey - error %d\n",
209                         klm_num ? "an in" : "a ", errno);
210                 rte_errno = errno;
211                 rte_free(mkey);
212                 return NULL;
213         }
214         mkey->id = MLX5_GET(create_mkey_out, out, mkey_index);
215         mkey->id = (mkey->id << 8) | (attr->umem_id & 0xFF);
216         return mkey;
217 }
218
219 /**
220  * Get status of devx command response.
221  * Mainly used for asynchronous commands.
222  *
223  * @param[in] out
224  *   The out response buffer.
225  *
226  * @return
227  *   0 on success, non-zero value otherwise.
228  */
229 int
230 mlx5_devx_get_out_command_status(void *out)
231 {
232         int status;
233
234         if (!out)
235                 return -EINVAL;
236         status = MLX5_GET(query_flow_counter_out, out, status);
237         if (status) {
238                 int syndrome = MLX5_GET(query_flow_counter_out, out, syndrome);
239
240                 DRV_LOG(ERR, "Bad devX status %x, syndrome = %x", status,
241                         syndrome);
242         }
243         return status;
244 }
245
246 /**
247  * Destroy any object allocated by a Devx API.
248  *
249  * @param[in] obj
250  *   Pointer to a general object.
251  *
252  * @return
253  *   0 on success, a negative value otherwise.
254  */
255 int
256 mlx5_devx_cmd_destroy(struct mlx5_devx_obj *obj)
257 {
258         int ret;
259
260         if (!obj)
261                 return 0;
262         ret =  mlx5_glue->devx_obj_destroy(obj->obj);
263         rte_free(obj);
264         return ret;
265 }
266
267 /**
268  * Query NIC vport context.
269  * Fills minimal inline attribute.
270  *
271  * @param[in] ctx
272  *   ibv contexts returned from mlx5dv_open_device.
273  * @param[in] vport
274  *   vport index
275  * @param[out] attr
276  *   Attributes device values.
277  *
278  * @return
279  *   0 on success, a negative value otherwise.
280  */
281 static int
282 mlx5_devx_cmd_query_nic_vport_context(void *ctx,
283                                       unsigned int vport,
284                                       struct mlx5_hca_attr *attr)
285 {
286         uint32_t in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
287         uint32_t out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};
288         void *vctx;
289         int status, syndrome, rc;
290
291         /* Query NIC vport context to determine inline mode. */
292         MLX5_SET(query_nic_vport_context_in, in, opcode,
293                  MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
294         MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
295         if (vport)
296                 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
297         rc = mlx5_glue->devx_general_cmd(ctx,
298                                          in, sizeof(in),
299                                          out, sizeof(out));
300         if (rc)
301                 goto error;
302         status = MLX5_GET(query_nic_vport_context_out, out, status);
303         syndrome = MLX5_GET(query_nic_vport_context_out, out, syndrome);
304         if (status) {
305                 DRV_LOG(DEBUG, "Failed to query NIC vport context, "
306                         "status %x, syndrome = %x",
307                         status, syndrome);
308                 return -1;
309         }
310         vctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
311                             nic_vport_context);
312         attr->vport_inline_mode = MLX5_GET(nic_vport_context, vctx,
313                                            min_wqe_inline_mode);
314         return 0;
315 error:
316         rc = (rc > 0) ? -rc : rc;
317         return rc;
318 }
319
320 /**
321  * Query NIC vDPA attributes.
322  *
323  * @param[in] ctx
324  *   Context returned from mlx5 open_device() glue function.
325  * @param[out] vdpa_attr
326  *   vDPA Attributes structure to fill.
327  */
328 static void
329 mlx5_devx_cmd_query_hca_vdpa_attr(void *ctx,
330                                   struct mlx5_hca_vdpa_attr *vdpa_attr)
331 {
332         uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0};
333         uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0};
334         void *hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
335         int status, syndrome, rc;
336
337         MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
338         MLX5_SET(query_hca_cap_in, in, op_mod,
339                  MLX5_GET_HCA_CAP_OP_MOD_VDPA_EMULATION |
340                  MLX5_HCA_CAP_OPMOD_GET_CUR);
341         rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
342         status = MLX5_GET(query_hca_cap_out, out, status);
343         syndrome = MLX5_GET(query_hca_cap_out, out, syndrome);
344         if (rc || status) {
345                 RTE_LOG(DEBUG, PMD, "Failed to query devx VDPA capabilities,"
346                         " status %x, syndrome = %x", status, syndrome);
347                 vdpa_attr->valid = 0;
348         } else {
349                 vdpa_attr->valid = 1;
350                 vdpa_attr->desc_tunnel_offload_type =
351                         MLX5_GET(virtio_emulation_cap, hcattr,
352                                  desc_tunnel_offload_type);
353                 vdpa_attr->eth_frame_offload_type =
354                         MLX5_GET(virtio_emulation_cap, hcattr,
355                                  eth_frame_offload_type);
356                 vdpa_attr->virtio_version_1_0 =
357                         MLX5_GET(virtio_emulation_cap, hcattr,
358                                  virtio_version_1_0);
359                 vdpa_attr->tso_ipv4 = MLX5_GET(virtio_emulation_cap, hcattr,
360                                                tso_ipv4);
361                 vdpa_attr->tso_ipv6 = MLX5_GET(virtio_emulation_cap, hcattr,
362                                                tso_ipv6);
363                 vdpa_attr->tx_csum = MLX5_GET(virtio_emulation_cap, hcattr,
364                                               tx_csum);
365                 vdpa_attr->rx_csum = MLX5_GET(virtio_emulation_cap, hcattr,
366                                               rx_csum);
367                 vdpa_attr->event_mode = MLX5_GET(virtio_emulation_cap, hcattr,
368                                                  event_mode);
369                 vdpa_attr->virtio_queue_type =
370                         MLX5_GET(virtio_emulation_cap, hcattr,
371                                  virtio_queue_type);
372                 vdpa_attr->log_doorbell_stride =
373                         MLX5_GET(virtio_emulation_cap, hcattr,
374                                  log_doorbell_stride);
375                 vdpa_attr->log_doorbell_bar_size =
376                         MLX5_GET(virtio_emulation_cap, hcattr,
377                                  log_doorbell_bar_size);
378                 vdpa_attr->doorbell_bar_offset =
379                         MLX5_GET64(virtio_emulation_cap, hcattr,
380                                    doorbell_bar_offset);
381                 vdpa_attr->max_num_virtio_queues =
382                         MLX5_GET(virtio_emulation_cap, hcattr,
383                                  max_num_virtio_queues);
384                 vdpa_attr->umems[0].a = MLX5_GET(virtio_emulation_cap, hcattr,
385                                                  umem_1_buffer_param_a);
386                 vdpa_attr->umems[0].b = MLX5_GET(virtio_emulation_cap, hcattr,
387                                                  umem_1_buffer_param_b);
388                 vdpa_attr->umems[1].a = MLX5_GET(virtio_emulation_cap, hcattr,
389                                                  umem_2_buffer_param_a);
390                 vdpa_attr->umems[1].b = MLX5_GET(virtio_emulation_cap, hcattr,
391                                                  umem_2_buffer_param_b);
392                 vdpa_attr->umems[2].a = MLX5_GET(virtio_emulation_cap, hcattr,
393                                                  umem_3_buffer_param_a);
394                 vdpa_attr->umems[2].b = MLX5_GET(virtio_emulation_cap, hcattr,
395                                                  umem_3_buffer_param_b);
396         }
397 }
398
399 /**
400  * Query HCA attributes.
401  * Using those attributes we can check on run time if the device
402  * is having the required capabilities.
403  *
404  * @param[in] ctx
405  *   Context returned from mlx5 open_device() glue function.
406  * @param[out] attr
407  *   Attributes device values.
408  *
409  * @return
410  *   0 on success, a negative value otherwise.
411  */
412 int
413 mlx5_devx_cmd_query_hca_attr(void *ctx,
414                              struct mlx5_hca_attr *attr)
415 {
416         uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0};
417         uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0};
418         void *hcattr;
419         int status, syndrome, rc, i;
420
421         MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
422         MLX5_SET(query_hca_cap_in, in, op_mod,
423                  MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE |
424                  MLX5_HCA_CAP_OPMOD_GET_CUR);
425
426         rc = mlx5_glue->devx_general_cmd(ctx,
427                                          in, sizeof(in), out, sizeof(out));
428         if (rc)
429                 goto error;
430         status = MLX5_GET(query_hca_cap_out, out, status);
431         syndrome = MLX5_GET(query_hca_cap_out, out, syndrome);
432         if (status) {
433                 DRV_LOG(DEBUG, "Failed to query devx HCA capabilities, "
434                         "status %x, syndrome = %x",
435                         status, syndrome);
436                 return -1;
437         }
438         hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
439         attr->flow_counter_bulk_alloc_bitmap =
440                         MLX5_GET(cmd_hca_cap, hcattr, flow_counter_bulk_alloc);
441         attr->flow_counters_dump = MLX5_GET(cmd_hca_cap, hcattr,
442                                             flow_counters_dump);
443         attr->log_max_rqt_size = MLX5_GET(cmd_hca_cap, hcattr,
444                                           log_max_rqt_size);
445         attr->eswitch_manager = MLX5_GET(cmd_hca_cap, hcattr, eswitch_manager);
446         attr->hairpin = MLX5_GET(cmd_hca_cap, hcattr, hairpin);
447         attr->log_max_hairpin_queues = MLX5_GET(cmd_hca_cap, hcattr,
448                                                 log_max_hairpin_queues);
449         attr->log_max_hairpin_wq_data_sz = MLX5_GET(cmd_hca_cap, hcattr,
450                                                     log_max_hairpin_wq_data_sz);
451         attr->log_max_hairpin_num_packets = MLX5_GET
452                 (cmd_hca_cap, hcattr, log_min_hairpin_wq_data_sz);
453         attr->vhca_id = MLX5_GET(cmd_hca_cap, hcattr, vhca_id);
454         attr->relaxed_ordering_write = MLX5_GET(cmd_hca_cap, hcattr,
455                         relaxed_ordering_write);
456         attr->relaxed_ordering_read = MLX5_GET(cmd_hca_cap, hcattr,
457                         relaxed_ordering_read);
458         attr->eth_net_offloads = MLX5_GET(cmd_hca_cap, hcattr,
459                                           eth_net_offloads);
460         attr->eth_virt = MLX5_GET(cmd_hca_cap, hcattr, eth_virt);
461         attr->flex_parser_protocols = MLX5_GET(cmd_hca_cap, hcattr,
462                                                flex_parser_protocols);
463         attr->qos.sup = MLX5_GET(cmd_hca_cap, hcattr, qos);
464         attr->vdpa.valid = !!(MLX5_GET64(cmd_hca_cap, hcattr,
465                                          general_obj_types) &
466                               MLX5_GENERAL_OBJ_TYPES_CAP_VIRTQ_NET_Q);
467         attr->vdpa.queue_counters_valid = !!(MLX5_GET64(cmd_hca_cap, hcattr,
468                                                         general_obj_types) &
469                                   MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_Q_COUNTERS);
470         attr->wqe_index_ignore = MLX5_GET(cmd_hca_cap, hcattr,
471                                           wqe_index_ignore_cap);
472         attr->cross_channel = MLX5_GET(cmd_hca_cap, hcattr, cd);
473         attr->non_wire_sq = MLX5_GET(cmd_hca_cap, hcattr, non_wire_sq);
474         attr->log_max_static_sq_wq = MLX5_GET(cmd_hca_cap, hcattr,
475                                               log_max_static_sq_wq);
476         attr->dev_freq_khz = MLX5_GET(cmd_hca_cap, hcattr,
477                                       device_frequency_khz);
478         attr->regex = MLX5_GET(cmd_hca_cap, hcattr, regexp);
479         attr->regexp_num_of_engines = MLX5_GET(cmd_hca_cap, hcattr,
480                                                regexp_num_of_engines);
481         if (attr->qos.sup) {
482                 MLX5_SET(query_hca_cap_in, in, op_mod,
483                          MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP |
484                          MLX5_HCA_CAP_OPMOD_GET_CUR);
485                 rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in),
486                                                  out, sizeof(out));
487                 if (rc)
488                         goto error;
489                 if (status) {
490                         DRV_LOG(DEBUG, "Failed to query devx QOS capabilities,"
491                                 " status %x, syndrome = %x",
492                                 status, syndrome);
493                         return -1;
494                 }
495                 hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
496                 attr->qos.srtcm_sup =
497                                 MLX5_GET(qos_cap, hcattr, flow_meter_srtcm);
498                 attr->qos.log_max_flow_meter =
499                                 MLX5_GET(qos_cap, hcattr, log_max_flow_meter);
500                 attr->qos.flow_meter_reg_c_ids =
501                                 MLX5_GET(qos_cap, hcattr, flow_meter_reg_id);
502                 attr->qos.flow_meter_reg_share =
503                                 MLX5_GET(qos_cap, hcattr, flow_meter_reg_share);
504                 attr->qos.packet_pacing =
505                                 MLX5_GET(qos_cap, hcattr, packet_pacing);
506                 attr->qos.wqe_rate_pp =
507                                 MLX5_GET(qos_cap, hcattr, wqe_rate_pp);
508         }
509         if (attr->vdpa.valid)
510                 mlx5_devx_cmd_query_hca_vdpa_attr(ctx, &attr->vdpa);
511         if (!attr->eth_net_offloads)
512                 return 0;
513
514         /* Query HCA offloads for Ethernet protocol. */
515         memset(in, 0, sizeof(in));
516         memset(out, 0, sizeof(out));
517         MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
518         MLX5_SET(query_hca_cap_in, in, op_mod,
519                  MLX5_GET_HCA_CAP_OP_MOD_ETHERNET_OFFLOAD_CAPS |
520                  MLX5_HCA_CAP_OPMOD_GET_CUR);
521
522         rc = mlx5_glue->devx_general_cmd(ctx,
523                                          in, sizeof(in),
524                                          out, sizeof(out));
525         if (rc) {
526                 attr->eth_net_offloads = 0;
527                 goto error;
528         }
529         status = MLX5_GET(query_hca_cap_out, out, status);
530         syndrome = MLX5_GET(query_hca_cap_out, out, syndrome);
531         if (status) {
532                 DRV_LOG(DEBUG, "Failed to query devx HCA capabilities, "
533                         "status %x, syndrome = %x",
534                         status, syndrome);
535                 attr->eth_net_offloads = 0;
536                 return -1;
537         }
538         hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
539         attr->wqe_vlan_insert = MLX5_GET(per_protocol_networking_offload_caps,
540                                          hcattr, wqe_vlan_insert);
541         attr->lro_cap = MLX5_GET(per_protocol_networking_offload_caps, hcattr,
542                                  lro_cap);
543         attr->tunnel_lro_gre = MLX5_GET(per_protocol_networking_offload_caps,
544                                         hcattr, tunnel_lro_gre);
545         attr->tunnel_lro_vxlan = MLX5_GET(per_protocol_networking_offload_caps,
546                                           hcattr, tunnel_lro_vxlan);
547         attr->lro_max_msg_sz_mode = MLX5_GET
548                                         (per_protocol_networking_offload_caps,
549                                          hcattr, lro_max_msg_sz_mode);
550         for (i = 0 ; i < MLX5_LRO_NUM_SUPP_PERIODS ; i++) {
551                 attr->lro_timer_supported_periods[i] =
552                         MLX5_GET(per_protocol_networking_offload_caps, hcattr,
553                                  lro_timer_supported_periods[i]);
554         }
555         attr->tunnel_stateless_geneve_rx =
556                             MLX5_GET(per_protocol_networking_offload_caps,
557                                      hcattr, tunnel_stateless_geneve_rx);
558         attr->geneve_max_opt_len =
559                     MLX5_GET(per_protocol_networking_offload_caps,
560                              hcattr, max_geneve_opt_len);
561         attr->wqe_inline_mode = MLX5_GET(per_protocol_networking_offload_caps,
562                                          hcattr, wqe_inline_mode);
563         attr->tunnel_stateless_gtp = MLX5_GET
564                                         (per_protocol_networking_offload_caps,
565                                          hcattr, tunnel_stateless_gtp);
566         if (attr->wqe_inline_mode != MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
567                 return 0;
568         if (attr->eth_virt) {
569                 rc = mlx5_devx_cmd_query_nic_vport_context(ctx, 0, attr);
570                 if (rc) {
571                         attr->eth_virt = 0;
572                         goto error;
573                 }
574         }
575         return 0;
576 error:
577         rc = (rc > 0) ? -rc : rc;
578         return rc;
579 }
580
581 /**
582  * Query TIS transport domain from QP verbs object using DevX API.
583  *
584  * @param[in] qp
585  *   Pointer to verbs QP returned by ibv_create_qp .
586  * @param[in] tis_num
587  *   TIS number of TIS to query.
588  * @param[out] tis_td
589  *   Pointer to TIS transport domain variable, to be set by the routine.
590  *
591  * @return
592  *   0 on success, a negative value otherwise.
593  */
594 int
595 mlx5_devx_cmd_qp_query_tis_td(void *qp, uint32_t tis_num,
596                               uint32_t *tis_td)
597 {
598 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
599         uint32_t in[MLX5_ST_SZ_DW(query_tis_in)] = {0};
600         uint32_t out[MLX5_ST_SZ_DW(query_tis_out)] = {0};
601         int rc;
602         void *tis_ctx;
603
604         MLX5_SET(query_tis_in, in, opcode, MLX5_CMD_OP_QUERY_TIS);
605         MLX5_SET(query_tis_in, in, tisn, tis_num);
606         rc = mlx5_glue->devx_qp_query(qp, in, sizeof(in), out, sizeof(out));
607         if (rc) {
608                 DRV_LOG(ERR, "Failed to query QP using DevX");
609                 return -rc;
610         };
611         tis_ctx = MLX5_ADDR_OF(query_tis_out, out, tis_context);
612         *tis_td = MLX5_GET(tisc, tis_ctx, transport_domain);
613         return 0;
614 #else
615         (void)qp;
616         (void)tis_num;
617         (void)tis_td;
618         return -ENOTSUP;
619 #endif
620 }
621
622 /**
623  * Fill WQ data for DevX API command.
624  * Utility function for use when creating DevX objects containing a WQ.
625  *
626  * @param[in] wq_ctx
627  *   Pointer to WQ context to fill with data.
628  * @param [in] wq_attr
629  *   Pointer to WQ attributes structure to fill in WQ context.
630  */
631 static void
632 devx_cmd_fill_wq_data(void *wq_ctx, struct mlx5_devx_wq_attr *wq_attr)
633 {
634         MLX5_SET(wq, wq_ctx, wq_type, wq_attr->wq_type);
635         MLX5_SET(wq, wq_ctx, wq_signature, wq_attr->wq_signature);
636         MLX5_SET(wq, wq_ctx, end_padding_mode, wq_attr->end_padding_mode);
637         MLX5_SET(wq, wq_ctx, cd_slave, wq_attr->cd_slave);
638         MLX5_SET(wq, wq_ctx, hds_skip_first_sge, wq_attr->hds_skip_first_sge);
639         MLX5_SET(wq, wq_ctx, log2_hds_buf_size, wq_attr->log2_hds_buf_size);
640         MLX5_SET(wq, wq_ctx, page_offset, wq_attr->page_offset);
641         MLX5_SET(wq, wq_ctx, lwm, wq_attr->lwm);
642         MLX5_SET(wq, wq_ctx, pd, wq_attr->pd);
643         MLX5_SET(wq, wq_ctx, uar_page, wq_attr->uar_page);
644         MLX5_SET64(wq, wq_ctx, dbr_addr, wq_attr->dbr_addr);
645         MLX5_SET(wq, wq_ctx, hw_counter, wq_attr->hw_counter);
646         MLX5_SET(wq, wq_ctx, sw_counter, wq_attr->sw_counter);
647         MLX5_SET(wq, wq_ctx, log_wq_stride, wq_attr->log_wq_stride);
648         MLX5_SET(wq, wq_ctx, log_wq_pg_sz, wq_attr->log_wq_pg_sz);
649         MLX5_SET(wq, wq_ctx, log_wq_sz, wq_attr->log_wq_sz);
650         MLX5_SET(wq, wq_ctx, dbr_umem_valid, wq_attr->dbr_umem_valid);
651         MLX5_SET(wq, wq_ctx, wq_umem_valid, wq_attr->wq_umem_valid);
652         MLX5_SET(wq, wq_ctx, log_hairpin_num_packets,
653                  wq_attr->log_hairpin_num_packets);
654         MLX5_SET(wq, wq_ctx, log_hairpin_data_sz, wq_attr->log_hairpin_data_sz);
655         MLX5_SET(wq, wq_ctx, single_wqe_log_num_of_strides,
656                  wq_attr->single_wqe_log_num_of_strides);
657         MLX5_SET(wq, wq_ctx, two_byte_shift_en, wq_attr->two_byte_shift_en);
658         MLX5_SET(wq, wq_ctx, single_stride_log_num_of_bytes,
659                  wq_attr->single_stride_log_num_of_bytes);
660         MLX5_SET(wq, wq_ctx, dbr_umem_id, wq_attr->dbr_umem_id);
661         MLX5_SET(wq, wq_ctx, wq_umem_id, wq_attr->wq_umem_id);
662         MLX5_SET64(wq, wq_ctx, wq_umem_offset, wq_attr->wq_umem_offset);
663 }
664
665 /**
666  * Create RQ using DevX API.
667  *
668  * @param[in] ctx
669  *   Context returned from mlx5 open_device() glue function.
670  * @param [in] rq_attr
671  *   Pointer to create RQ attributes structure.
672  * @param [in] socket
673  *   CPU socket ID for allocations.
674  *
675  * @return
676  *   The DevX object created, NULL otherwise and rte_errno is set.
677  */
678 struct mlx5_devx_obj *
679 mlx5_devx_cmd_create_rq(void *ctx,
680                         struct mlx5_devx_create_rq_attr *rq_attr,
681                         int socket)
682 {
683         uint32_t in[MLX5_ST_SZ_DW(create_rq_in)] = {0};
684         uint32_t out[MLX5_ST_SZ_DW(create_rq_out)] = {0};
685         void *rq_ctx, *wq_ctx;
686         struct mlx5_devx_wq_attr *wq_attr;
687         struct mlx5_devx_obj *rq = NULL;
688
689         rq = rte_calloc_socket(__func__, 1, sizeof(*rq), 0, socket);
690         if (!rq) {
691                 DRV_LOG(ERR, "Failed to allocate RQ data");
692                 rte_errno = ENOMEM;
693                 return NULL;
694         }
695         MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ);
696         rq_ctx = MLX5_ADDR_OF(create_rq_in, in, ctx);
697         MLX5_SET(rqc, rq_ctx, rlky, rq_attr->rlky);
698         MLX5_SET(rqc, rq_ctx, delay_drop_en, rq_attr->delay_drop_en);
699         MLX5_SET(rqc, rq_ctx, scatter_fcs, rq_attr->scatter_fcs);
700         MLX5_SET(rqc, rq_ctx, vsd, rq_attr->vsd);
701         MLX5_SET(rqc, rq_ctx, mem_rq_type, rq_attr->mem_rq_type);
702         MLX5_SET(rqc, rq_ctx, state, rq_attr->state);
703         MLX5_SET(rqc, rq_ctx, flush_in_error_en, rq_attr->flush_in_error_en);
704         MLX5_SET(rqc, rq_ctx, hairpin, rq_attr->hairpin);
705         MLX5_SET(rqc, rq_ctx, user_index, rq_attr->user_index);
706         MLX5_SET(rqc, rq_ctx, cqn, rq_attr->cqn);
707         MLX5_SET(rqc, rq_ctx, counter_set_id, rq_attr->counter_set_id);
708         MLX5_SET(rqc, rq_ctx, rmpn, rq_attr->rmpn);
709         wq_ctx = MLX5_ADDR_OF(rqc, rq_ctx, wq);
710         wq_attr = &rq_attr->wq_attr;
711         devx_cmd_fill_wq_data(wq_ctx, wq_attr);
712         rq->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
713                                                   out, sizeof(out));
714         if (!rq->obj) {
715                 DRV_LOG(ERR, "Failed to create RQ using DevX");
716                 rte_errno = errno;
717                 rte_free(rq);
718                 return NULL;
719         }
720         rq->id = MLX5_GET(create_rq_out, out, rqn);
721         return rq;
722 }
723
724 /**
725  * Modify RQ using DevX API.
726  *
727  * @param[in] rq
728  *   Pointer to RQ object structure.
729  * @param [in] rq_attr
730  *   Pointer to modify RQ attributes structure.
731  *
732  * @return
733  *   0 on success, a negative errno value otherwise and rte_errno is set.
734  */
735 int
736 mlx5_devx_cmd_modify_rq(struct mlx5_devx_obj *rq,
737                         struct mlx5_devx_modify_rq_attr *rq_attr)
738 {
739         uint32_t in[MLX5_ST_SZ_DW(modify_rq_in)] = {0};
740         uint32_t out[MLX5_ST_SZ_DW(modify_rq_out)] = {0};
741         void *rq_ctx, *wq_ctx;
742         int ret;
743
744         MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ);
745         MLX5_SET(modify_rq_in, in, rq_state, rq_attr->rq_state);
746         MLX5_SET(modify_rq_in, in, rqn, rq->id);
747         MLX5_SET64(modify_rq_in, in, modify_bitmask, rq_attr->modify_bitmask);
748         rq_ctx = MLX5_ADDR_OF(modify_rq_in, in, ctx);
749         MLX5_SET(rqc, rq_ctx, state, rq_attr->state);
750         if (rq_attr->modify_bitmask &
751                         MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS)
752                 MLX5_SET(rqc, rq_ctx, scatter_fcs, rq_attr->scatter_fcs);
753         if (rq_attr->modify_bitmask & MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD)
754                 MLX5_SET(rqc, rq_ctx, vsd, rq_attr->vsd);
755         if (rq_attr->modify_bitmask &
756                         MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID)
757                 MLX5_SET(rqc, rq_ctx, counter_set_id, rq_attr->counter_set_id);
758         MLX5_SET(rqc, rq_ctx, hairpin_peer_sq, rq_attr->hairpin_peer_sq);
759         MLX5_SET(rqc, rq_ctx, hairpin_peer_vhca, rq_attr->hairpin_peer_vhca);
760         if (rq_attr->modify_bitmask & MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_WQ_LWM) {
761                 wq_ctx = MLX5_ADDR_OF(rqc, rq_ctx, wq);
762                 MLX5_SET(wq, wq_ctx, lwm, rq_attr->lwm);
763         }
764         ret = mlx5_glue->devx_obj_modify(rq->obj, in, sizeof(in),
765                                          out, sizeof(out));
766         if (ret) {
767                 DRV_LOG(ERR, "Failed to modify RQ using DevX");
768                 rte_errno = errno;
769                 return -errno;
770         }
771         return ret;
772 }
773
774 /**
775  * Create TIR using DevX API.
776  *
777  * @param[in] ctx
778  *  Context returned from mlx5 open_device() glue function.
779  * @param [in] tir_attr
780  *   Pointer to TIR attributes structure.
781  *
782  * @return
783  *   The DevX object created, NULL otherwise and rte_errno is set.
784  */
785 struct mlx5_devx_obj *
786 mlx5_devx_cmd_create_tir(void *ctx,
787                          struct mlx5_devx_tir_attr *tir_attr)
788 {
789         uint32_t in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
790         uint32_t out[MLX5_ST_SZ_DW(create_tir_out)] = {0};
791         void *tir_ctx, *outer, *inner, *rss_key;
792         struct mlx5_devx_obj *tir = NULL;
793
794         tir = rte_calloc(__func__, 1, sizeof(*tir), 0);
795         if (!tir) {
796                 DRV_LOG(ERR, "Failed to allocate TIR data");
797                 rte_errno = ENOMEM;
798                 return NULL;
799         }
800         MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
801         tir_ctx = MLX5_ADDR_OF(create_tir_in, in, ctx);
802         MLX5_SET(tirc, tir_ctx, disp_type, tir_attr->disp_type);
803         MLX5_SET(tirc, tir_ctx, lro_timeout_period_usecs,
804                  tir_attr->lro_timeout_period_usecs);
805         MLX5_SET(tirc, tir_ctx, lro_enable_mask, tir_attr->lro_enable_mask);
806         MLX5_SET(tirc, tir_ctx, lro_max_msg_sz, tir_attr->lro_max_msg_sz);
807         MLX5_SET(tirc, tir_ctx, inline_rqn, tir_attr->inline_rqn);
808         MLX5_SET(tirc, tir_ctx, rx_hash_symmetric, tir_attr->rx_hash_symmetric);
809         MLX5_SET(tirc, tir_ctx, tunneled_offload_en,
810                  tir_attr->tunneled_offload_en);
811         MLX5_SET(tirc, tir_ctx, indirect_table, tir_attr->indirect_table);
812         MLX5_SET(tirc, tir_ctx, rx_hash_fn, tir_attr->rx_hash_fn);
813         MLX5_SET(tirc, tir_ctx, self_lb_block, tir_attr->self_lb_block);
814         MLX5_SET(tirc, tir_ctx, transport_domain, tir_attr->transport_domain);
815         rss_key = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_toeplitz_key);
816         memcpy(rss_key, tir_attr->rx_hash_toeplitz_key, MLX5_RSS_HASH_KEY_LEN);
817         outer = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_field_selector_outer);
818         MLX5_SET(rx_hash_field_select, outer, l3_prot_type,
819                  tir_attr->rx_hash_field_selector_outer.l3_prot_type);
820         MLX5_SET(rx_hash_field_select, outer, l4_prot_type,
821                  tir_attr->rx_hash_field_selector_outer.l4_prot_type);
822         MLX5_SET(rx_hash_field_select, outer, selected_fields,
823                  tir_attr->rx_hash_field_selector_outer.selected_fields);
824         inner = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_field_selector_inner);
825         MLX5_SET(rx_hash_field_select, inner, l3_prot_type,
826                  tir_attr->rx_hash_field_selector_inner.l3_prot_type);
827         MLX5_SET(rx_hash_field_select, inner, l4_prot_type,
828                  tir_attr->rx_hash_field_selector_inner.l4_prot_type);
829         MLX5_SET(rx_hash_field_select, inner, selected_fields,
830                  tir_attr->rx_hash_field_selector_inner.selected_fields);
831         tir->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
832                                                    out, sizeof(out));
833         if (!tir->obj) {
834                 DRV_LOG(ERR, "Failed to create TIR using DevX");
835                 rte_errno = errno;
836                 rte_free(tir);
837                 return NULL;
838         }
839         tir->id = MLX5_GET(create_tir_out, out, tirn);
840         return tir;
841 }
842
843 /**
844  * Create RQT using DevX API.
845  *
846  * @param[in] ctx
847  *   Context returned from mlx5 open_device() glue function.
848  * @param [in] rqt_attr
849  *   Pointer to RQT attributes structure.
850  *
851  * @return
852  *   The DevX object created, NULL otherwise and rte_errno is set.
853  */
854 struct mlx5_devx_obj *
855 mlx5_devx_cmd_create_rqt(void *ctx,
856                          struct mlx5_devx_rqt_attr *rqt_attr)
857 {
858         uint32_t *in = NULL;
859         uint32_t inlen = MLX5_ST_SZ_BYTES(create_rqt_in) +
860                          rqt_attr->rqt_actual_size * sizeof(uint32_t);
861         uint32_t out[MLX5_ST_SZ_DW(create_rqt_out)] = {0};
862         void *rqt_ctx;
863         struct mlx5_devx_obj *rqt = NULL;
864         int i;
865
866         in = rte_calloc(__func__, 1, inlen, 0);
867         if (!in) {
868                 DRV_LOG(ERR, "Failed to allocate RQT IN data");
869                 rte_errno = ENOMEM;
870                 return NULL;
871         }
872         rqt = rte_calloc(__func__, 1, sizeof(*rqt), 0);
873         if (!rqt) {
874                 DRV_LOG(ERR, "Failed to allocate RQT data");
875                 rte_errno = ENOMEM;
876                 rte_free(in);
877                 return NULL;
878         }
879         MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
880         rqt_ctx = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
881         MLX5_SET(rqtc, rqt_ctx, list_q_type, rqt_attr->rq_type);
882         MLX5_SET(rqtc, rqt_ctx, rqt_max_size, rqt_attr->rqt_max_size);
883         MLX5_SET(rqtc, rqt_ctx, rqt_actual_size, rqt_attr->rqt_actual_size);
884         for (i = 0; i < rqt_attr->rqt_actual_size; i++)
885                 MLX5_SET(rqtc, rqt_ctx, rq_num[i], rqt_attr->rq_list[i]);
886         rqt->obj = mlx5_glue->devx_obj_create(ctx, in, inlen, out, sizeof(out));
887         rte_free(in);
888         if (!rqt->obj) {
889                 DRV_LOG(ERR, "Failed to create RQT using DevX");
890                 rte_errno = errno;
891                 rte_free(rqt);
892                 return NULL;
893         }
894         rqt->id = MLX5_GET(create_rqt_out, out, rqtn);
895         return rqt;
896 }
897
898 /**
899  * Modify RQT using DevX API.
900  *
901  * @param[in] rqt
902  *   Pointer to RQT DevX object structure.
903  * @param [in] rqt_attr
904  *   Pointer to RQT attributes structure.
905  *
906  * @return
907  *   0 on success, a negative errno value otherwise and rte_errno is set.
908  */
909 int
910 mlx5_devx_cmd_modify_rqt(struct mlx5_devx_obj *rqt,
911                          struct mlx5_devx_rqt_attr *rqt_attr)
912 {
913         uint32_t inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) +
914                          rqt_attr->rqt_actual_size * sizeof(uint32_t);
915         uint32_t out[MLX5_ST_SZ_DW(modify_rqt_out)] = {0};
916         uint32_t *in = rte_calloc(__func__, 1, inlen, 0);
917         void *rqt_ctx;
918         int i;
919         int ret;
920
921         if (!in) {
922                 DRV_LOG(ERR, "Failed to allocate RQT modify IN data.");
923                 rte_errno = ENOMEM;
924                 return -ENOMEM;
925         }
926         MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT);
927         MLX5_SET(modify_rqt_in, in, rqtn, rqt->id);
928         MLX5_SET64(modify_rqt_in, in, modify_bitmask, 0x1);
929         rqt_ctx = MLX5_ADDR_OF(modify_rqt_in, in, rqt_context);
930         MLX5_SET(rqtc, rqt_ctx, list_q_type, rqt_attr->rq_type);
931         MLX5_SET(rqtc, rqt_ctx, rqt_max_size, rqt_attr->rqt_max_size);
932         MLX5_SET(rqtc, rqt_ctx, rqt_actual_size, rqt_attr->rqt_actual_size);
933         for (i = 0; i < rqt_attr->rqt_actual_size; i++)
934                 MLX5_SET(rqtc, rqt_ctx, rq_num[i], rqt_attr->rq_list[i]);
935         ret = mlx5_glue->devx_obj_modify(rqt->obj, in, inlen, out, sizeof(out));
936         rte_free(in);
937         if (ret) {
938                 DRV_LOG(ERR, "Failed to modify RQT using DevX.");
939                 rte_errno = errno;
940                 return -rte_errno;
941         }
942         return ret;
943 }
944
945 /**
946  * Create SQ using DevX API.
947  *
948  * @param[in] ctx
949  *   Context returned from mlx5 open_device() glue function.
950  * @param [in] sq_attr
951  *   Pointer to SQ attributes structure.
952  * @param [in] socket
953  *   CPU socket ID for allocations.
954  *
955  * @return
956  *   The DevX object created, NULL otherwise and rte_errno is set.
957  **/
958 struct mlx5_devx_obj *
959 mlx5_devx_cmd_create_sq(void *ctx,
960                         struct mlx5_devx_create_sq_attr *sq_attr)
961 {
962         uint32_t in[MLX5_ST_SZ_DW(create_sq_in)] = {0};
963         uint32_t out[MLX5_ST_SZ_DW(create_sq_out)] = {0};
964         void *sq_ctx;
965         void *wq_ctx;
966         struct mlx5_devx_wq_attr *wq_attr;
967         struct mlx5_devx_obj *sq = NULL;
968
969         sq = rte_calloc(__func__, 1, sizeof(*sq), 0);
970         if (!sq) {
971                 DRV_LOG(ERR, "Failed to allocate SQ data");
972                 rte_errno = ENOMEM;
973                 return NULL;
974         }
975         MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
976         sq_ctx = MLX5_ADDR_OF(create_sq_in, in, ctx);
977         MLX5_SET(sqc, sq_ctx, rlky, sq_attr->rlky);
978         MLX5_SET(sqc, sq_ctx, cd_master, sq_attr->cd_master);
979         MLX5_SET(sqc, sq_ctx, fre, sq_attr->fre);
980         MLX5_SET(sqc, sq_ctx, flush_in_error_en, sq_attr->flush_in_error_en);
981         MLX5_SET(sqc, sq_ctx, allow_multi_pkt_send_wqe,
982                  sq_attr->flush_in_error_en);
983         MLX5_SET(sqc, sq_ctx, min_wqe_inline_mode,
984                  sq_attr->min_wqe_inline_mode);
985         MLX5_SET(sqc, sq_ctx, state, sq_attr->state);
986         MLX5_SET(sqc, sq_ctx, reg_umr, sq_attr->reg_umr);
987         MLX5_SET(sqc, sq_ctx, allow_swp, sq_attr->allow_swp);
988         MLX5_SET(sqc, sq_ctx, hairpin, sq_attr->hairpin);
989         MLX5_SET(sqc, sq_ctx, non_wire, sq_attr->non_wire);
990         MLX5_SET(sqc, sq_ctx, static_sq_wq, sq_attr->static_sq_wq);
991         MLX5_SET(sqc, sq_ctx, user_index, sq_attr->user_index);
992         MLX5_SET(sqc, sq_ctx, cqn, sq_attr->cqn);
993         MLX5_SET(sqc, sq_ctx, packet_pacing_rate_limit_index,
994                  sq_attr->packet_pacing_rate_limit_index);
995         MLX5_SET(sqc, sq_ctx, tis_lst_sz, sq_attr->tis_lst_sz);
996         MLX5_SET(sqc, sq_ctx, tis_num_0, sq_attr->tis_num);
997         wq_ctx = MLX5_ADDR_OF(sqc, sq_ctx, wq);
998         wq_attr = &sq_attr->wq_attr;
999         devx_cmd_fill_wq_data(wq_ctx, wq_attr);
1000         sq->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
1001                                              out, sizeof(out));
1002         if (!sq->obj) {
1003                 DRV_LOG(ERR, "Failed to create SQ using DevX");
1004                 rte_errno = errno;
1005                 rte_free(sq);
1006                 return NULL;
1007         }
1008         sq->id = MLX5_GET(create_sq_out, out, sqn);
1009         return sq;
1010 }
1011
1012 /**
1013  * Modify SQ using DevX API.
1014  *
1015  * @param[in] sq
1016  *   Pointer to SQ object structure.
1017  * @param [in] sq_attr
1018  *   Pointer to SQ attributes structure.
1019  *
1020  * @return
1021  *   0 on success, a negative errno value otherwise and rte_errno is set.
1022  */
1023 int
1024 mlx5_devx_cmd_modify_sq(struct mlx5_devx_obj *sq,
1025                         struct mlx5_devx_modify_sq_attr *sq_attr)
1026 {
1027         uint32_t in[MLX5_ST_SZ_DW(modify_sq_in)] = {0};
1028         uint32_t out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};
1029         void *sq_ctx;
1030         int ret;
1031
1032         MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
1033         MLX5_SET(modify_sq_in, in, sq_state, sq_attr->sq_state);
1034         MLX5_SET(modify_sq_in, in, sqn, sq->id);
1035         sq_ctx = MLX5_ADDR_OF(modify_sq_in, in, ctx);
1036         MLX5_SET(sqc, sq_ctx, state, sq_attr->state);
1037         MLX5_SET(sqc, sq_ctx, hairpin_peer_rq, sq_attr->hairpin_peer_rq);
1038         MLX5_SET(sqc, sq_ctx, hairpin_peer_vhca, sq_attr->hairpin_peer_vhca);
1039         ret = mlx5_glue->devx_obj_modify(sq->obj, in, sizeof(in),
1040                                          out, sizeof(out));
1041         if (ret) {
1042                 DRV_LOG(ERR, "Failed to modify SQ using DevX");
1043                 rte_errno = errno;
1044                 return -errno;
1045         }
1046         return ret;
1047 }
1048
1049 /**
1050  * Create TIS using DevX API.
1051  *
1052  * @param[in] ctx
1053  *   Context returned from mlx5 open_device() glue function.
1054  * @param [in] tis_attr
1055  *   Pointer to TIS attributes structure.
1056  *
1057  * @return
1058  *   The DevX object created, NULL otherwise and rte_errno is set.
1059  */
1060 struct mlx5_devx_obj *
1061 mlx5_devx_cmd_create_tis(void *ctx,
1062                          struct mlx5_devx_tis_attr *tis_attr)
1063 {
1064         uint32_t in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
1065         uint32_t out[MLX5_ST_SZ_DW(create_tis_out)] = {0};
1066         struct mlx5_devx_obj *tis = NULL;
1067         void *tis_ctx;
1068
1069         tis = rte_calloc(__func__, 1, sizeof(*tis), 0);
1070         if (!tis) {
1071                 DRV_LOG(ERR, "Failed to allocate TIS object");
1072                 rte_errno = ENOMEM;
1073                 return NULL;
1074         }
1075         MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
1076         tis_ctx = MLX5_ADDR_OF(create_tis_in, in, ctx);
1077         MLX5_SET(tisc, tis_ctx, strict_lag_tx_port_affinity,
1078                  tis_attr->strict_lag_tx_port_affinity);
1079         MLX5_SET(tisc, tis_ctx, strict_lag_tx_port_affinity,
1080                  tis_attr->strict_lag_tx_port_affinity);
1081         MLX5_SET(tisc, tis_ctx, prio, tis_attr->prio);
1082         MLX5_SET(tisc, tis_ctx, transport_domain,
1083                  tis_attr->transport_domain);
1084         tis->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
1085                                               out, sizeof(out));
1086         if (!tis->obj) {
1087                 DRV_LOG(ERR, "Failed to create TIS using DevX");
1088                 rte_errno = errno;
1089                 rte_free(tis);
1090                 return NULL;
1091         }
1092         tis->id = MLX5_GET(create_tis_out, out, tisn);
1093         return tis;
1094 }
1095
1096 /**
1097  * Create transport domain using DevX API.
1098  *
1099  * @param[in] ctx
1100  *   Context returned from mlx5 open_device() glue function.
1101  * @return
1102  *   The DevX object created, NULL otherwise and rte_errno is set.
1103  */
1104 struct mlx5_devx_obj *
1105 mlx5_devx_cmd_create_td(void *ctx)
1106 {
1107         uint32_t in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {0};
1108         uint32_t out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0};
1109         struct mlx5_devx_obj *td = NULL;
1110
1111         td = rte_calloc(__func__, 1, sizeof(*td), 0);
1112         if (!td) {
1113                 DRV_LOG(ERR, "Failed to allocate TD object");
1114                 rte_errno = ENOMEM;
1115                 return NULL;
1116         }
1117         MLX5_SET(alloc_transport_domain_in, in, opcode,
1118                  MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
1119         td->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
1120                                              out, sizeof(out));
1121         if (!td->obj) {
1122                 DRV_LOG(ERR, "Failed to create TIS using DevX");
1123                 rte_errno = errno;
1124                 rte_free(td);
1125                 return NULL;
1126         }
1127         td->id = MLX5_GET(alloc_transport_domain_out, out,
1128                            transport_domain);
1129         return td;
1130 }
1131
1132 /**
1133  * Dump all flows to file.
1134  *
1135  * @param[in] fdb_domain
1136  *   FDB domain.
1137  * @param[in] rx_domain
1138  *   RX domain.
1139  * @param[in] tx_domain
1140  *   TX domain.
1141  * @param[out] file
1142  *   Pointer to file stream.
1143  *
1144  * @return
1145  *   0 on success, a nagative value otherwise.
1146  */
1147 int
1148 mlx5_devx_cmd_flow_dump(void *fdb_domain __rte_unused,
1149                         void *rx_domain __rte_unused,
1150                         void *tx_domain __rte_unused, FILE *file __rte_unused)
1151 {
1152         int ret = 0;
1153
1154 #ifdef HAVE_MLX5_DR_FLOW_DUMP
1155         if (fdb_domain) {
1156                 ret = mlx5_glue->dr_dump_domain(file, fdb_domain);
1157                 if (ret)
1158                         return ret;
1159         }
1160         MLX5_ASSERT(rx_domain);
1161         ret = mlx5_glue->dr_dump_domain(file, rx_domain);
1162         if (ret)
1163                 return ret;
1164         MLX5_ASSERT(tx_domain);
1165         ret = mlx5_glue->dr_dump_domain(file, tx_domain);
1166 #else
1167         ret = ENOTSUP;
1168 #endif
1169         return -ret;
1170 }
1171
1172 /*
1173  * Create CQ using DevX API.
1174  *
1175  * @param[in] ctx
1176  *   Context returned from mlx5 open_device() glue function.
1177  * @param [in] attr
1178  *   Pointer to CQ attributes structure.
1179  *
1180  * @return
1181  *   The DevX object created, NULL otherwise and rte_errno is set.
1182  */
1183 struct mlx5_devx_obj *
1184 mlx5_devx_cmd_create_cq(void *ctx, struct mlx5_devx_cq_attr *attr)
1185 {
1186         uint32_t in[MLX5_ST_SZ_DW(create_cq_in)] = {0};
1187         uint32_t out[MLX5_ST_SZ_DW(create_cq_out)] = {0};
1188         struct mlx5_devx_obj *cq_obj = rte_zmalloc(__func__, sizeof(*cq_obj),
1189                                                    0);
1190         void *cqctx = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1191
1192         if (!cq_obj) {
1193                 DRV_LOG(ERR, "Failed to allocate CQ object memory.");
1194                 rte_errno = ENOMEM;
1195                 return NULL;
1196         }
1197         MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ);
1198         if (attr->db_umem_valid) {
1199                 MLX5_SET(cqc, cqctx, dbr_umem_valid, attr->db_umem_valid);
1200                 MLX5_SET(cqc, cqctx, dbr_umem_id, attr->db_umem_id);
1201                 MLX5_SET64(cqc, cqctx, dbr_addr, attr->db_umem_offset);
1202         } else {
1203                 MLX5_SET64(cqc, cqctx, dbr_addr, attr->db_addr);
1204         }
1205         MLX5_SET(cqc, cqctx, cqe_sz, attr->cqe_size);
1206         MLX5_SET(cqc, cqctx, cc, attr->use_first_only);
1207         MLX5_SET(cqc, cqctx, oi, attr->overrun_ignore);
1208         MLX5_SET(cqc, cqctx, log_cq_size, attr->log_cq_size);
1209         MLX5_SET(cqc, cqctx, log_page_size, attr->log_page_size -
1210                  MLX5_ADAPTER_PAGE_SHIFT);
1211         MLX5_SET(cqc, cqctx, c_eqn, attr->eqn);
1212         MLX5_SET(cqc, cqctx, uar_page, attr->uar_page_id);
1213         if (attr->q_umem_valid) {
1214                 MLX5_SET(create_cq_in, in, cq_umem_valid, attr->q_umem_valid);
1215                 MLX5_SET(create_cq_in, in, cq_umem_id, attr->q_umem_id);
1216                 MLX5_SET64(create_cq_in, in, cq_umem_offset,
1217                            attr->q_umem_offset);
1218         }
1219         cq_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out,
1220                                                  sizeof(out));
1221         if (!cq_obj->obj) {
1222                 rte_errno = errno;
1223                 DRV_LOG(ERR, "Failed to create CQ using DevX errno=%d.", errno);
1224                 rte_free(cq_obj);
1225                 return NULL;
1226         }
1227         cq_obj->id = MLX5_GET(create_cq_out, out, cqn);
1228         return cq_obj;
1229 }
1230
1231 /**
1232  * Create VIRTQ using DevX API.
1233  *
1234  * @param[in] ctx
1235  *   Context returned from mlx5 open_device() glue function.
1236  * @param [in] attr
1237  *   Pointer to VIRTQ attributes structure.
1238  *
1239  * @return
1240  *   The DevX object created, NULL otherwise and rte_errno is set.
1241  */
1242 struct mlx5_devx_obj *
1243 mlx5_devx_cmd_create_virtq(void *ctx,
1244                            struct mlx5_devx_virtq_attr *attr)
1245 {
1246         uint32_t in[MLX5_ST_SZ_DW(create_virtq_in)] = {0};
1247         uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
1248         struct mlx5_devx_obj *virtq_obj = rte_zmalloc(__func__,
1249                                                      sizeof(*virtq_obj), 0);
1250         void *virtq = MLX5_ADDR_OF(create_virtq_in, in, virtq);
1251         void *hdr = MLX5_ADDR_OF(create_virtq_in, in, hdr);
1252         void *virtctx = MLX5_ADDR_OF(virtio_net_q, virtq, virtio_q_context);
1253
1254         if (!virtq_obj) {
1255                 DRV_LOG(ERR, "Failed to allocate virtq data.");
1256                 rte_errno = ENOMEM;
1257                 return NULL;
1258         }
1259         MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
1260                  MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
1261         MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
1262                  MLX5_GENERAL_OBJ_TYPE_VIRTQ);
1263         MLX5_SET16(virtio_net_q, virtq, hw_available_index,
1264                    attr->hw_available_index);
1265         MLX5_SET16(virtio_net_q, virtq, hw_used_index, attr->hw_used_index);
1266         MLX5_SET16(virtio_net_q, virtq, tso_ipv4, attr->tso_ipv4);
1267         MLX5_SET16(virtio_net_q, virtq, tso_ipv6, attr->tso_ipv6);
1268         MLX5_SET16(virtio_net_q, virtq, tx_csum, attr->tx_csum);
1269         MLX5_SET16(virtio_net_q, virtq, rx_csum, attr->rx_csum);
1270         MLX5_SET16(virtio_q, virtctx, virtio_version_1_0,
1271                    attr->virtio_version_1_0);
1272         MLX5_SET16(virtio_q, virtctx, event_mode, attr->event_mode);
1273         MLX5_SET(virtio_q, virtctx, event_qpn_or_msix, attr->qp_id);
1274         MLX5_SET64(virtio_q, virtctx, desc_addr, attr->desc_addr);
1275         MLX5_SET64(virtio_q, virtctx, used_addr, attr->used_addr);
1276         MLX5_SET64(virtio_q, virtctx, available_addr, attr->available_addr);
1277         MLX5_SET16(virtio_q, virtctx, queue_index, attr->queue_index);
1278         MLX5_SET16(virtio_q, virtctx, queue_size, attr->q_size);
1279         MLX5_SET(virtio_q, virtctx, virtio_q_mkey, attr->mkey);
1280         MLX5_SET(virtio_q, virtctx, umem_1_id, attr->umems[0].id);
1281         MLX5_SET(virtio_q, virtctx, umem_1_size, attr->umems[0].size);
1282         MLX5_SET64(virtio_q, virtctx, umem_1_offset, attr->umems[0].offset);
1283         MLX5_SET(virtio_q, virtctx, umem_2_id, attr->umems[1].id);
1284         MLX5_SET(virtio_q, virtctx, umem_2_size, attr->umems[1].size);
1285         MLX5_SET64(virtio_q, virtctx, umem_2_offset, attr->umems[1].offset);
1286         MLX5_SET(virtio_q, virtctx, umem_3_id, attr->umems[2].id);
1287         MLX5_SET(virtio_q, virtctx, umem_3_size, attr->umems[2].size);
1288         MLX5_SET64(virtio_q, virtctx, umem_3_offset, attr->umems[2].offset);
1289         MLX5_SET(virtio_q, virtctx, counter_set_id, attr->counters_obj_id);
1290         MLX5_SET(virtio_q, virtctx, pd, attr->pd);
1291         MLX5_SET(virtio_net_q, virtq, tisn_or_qpn, attr->tis_id);
1292         virtq_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out,
1293                                                     sizeof(out));
1294         if (!virtq_obj->obj) {
1295                 rte_errno = errno;
1296                 DRV_LOG(ERR, "Failed to create VIRTQ Obj using DevX.");
1297                 rte_free(virtq_obj);
1298                 return NULL;
1299         }
1300         virtq_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
1301         return virtq_obj;
1302 }
1303
1304 /**
1305  * Modify VIRTQ using DevX API.
1306  *
1307  * @param[in] virtq_obj
1308  *   Pointer to virtq object structure.
1309  * @param [in] attr
1310  *   Pointer to modify virtq attributes structure.
1311  *
1312  * @return
1313  *   0 on success, a negative errno value otherwise and rte_errno is set.
1314  */
1315 int
1316 mlx5_devx_cmd_modify_virtq(struct mlx5_devx_obj *virtq_obj,
1317                            struct mlx5_devx_virtq_attr *attr)
1318 {
1319         uint32_t in[MLX5_ST_SZ_DW(create_virtq_in)] = {0};
1320         uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
1321         void *virtq = MLX5_ADDR_OF(create_virtq_in, in, virtq);
1322         void *hdr = MLX5_ADDR_OF(create_virtq_in, in, hdr);
1323         void *virtctx = MLX5_ADDR_OF(virtio_net_q, virtq, virtio_q_context);
1324         int ret;
1325
1326         MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
1327                  MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
1328         MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
1329                  MLX5_GENERAL_OBJ_TYPE_VIRTQ);
1330         MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, virtq_obj->id);
1331         MLX5_SET64(virtio_net_q, virtq, modify_field_select, attr->type);
1332         MLX5_SET16(virtio_q, virtctx, queue_index, attr->queue_index);
1333         switch (attr->type) {
1334         case MLX5_VIRTQ_MODIFY_TYPE_STATE:
1335                 MLX5_SET16(virtio_net_q, virtq, state, attr->state);
1336                 break;
1337         case MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_PARAMS:
1338                 MLX5_SET(virtio_net_q, virtq, dirty_bitmap_mkey,
1339                          attr->dirty_bitmap_mkey);
1340                 MLX5_SET64(virtio_net_q, virtq, dirty_bitmap_addr,
1341                          attr->dirty_bitmap_addr);
1342                 MLX5_SET(virtio_net_q, virtq, dirty_bitmap_size,
1343                          attr->dirty_bitmap_size);
1344                 break;
1345         case MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_DUMP_ENABLE:
1346                 MLX5_SET(virtio_net_q, virtq, dirty_bitmap_dump_enable,
1347                          attr->dirty_bitmap_dump_enable);
1348                 break;
1349         default:
1350                 rte_errno = EINVAL;
1351                 return -rte_errno;
1352         }
1353         ret = mlx5_glue->devx_obj_modify(virtq_obj->obj, in, sizeof(in),
1354                                          out, sizeof(out));
1355         if (ret) {
1356                 DRV_LOG(ERR, "Failed to modify VIRTQ using DevX.");
1357                 rte_errno = errno;
1358                 return -errno;
1359         }
1360         return ret;
1361 }
1362
1363 /**
1364  * Query VIRTQ using DevX API.
1365  *
1366  * @param[in] virtq_obj
1367  *   Pointer to virtq object structure.
1368  * @param [in/out] attr
1369  *   Pointer to virtq attributes structure.
1370  *
1371  * @return
1372  *   0 on success, a negative errno value otherwise and rte_errno is set.
1373  */
1374 int
1375 mlx5_devx_cmd_query_virtq(struct mlx5_devx_obj *virtq_obj,
1376                            struct mlx5_devx_virtq_attr *attr)
1377 {
1378         uint32_t in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0};
1379         uint32_t out[MLX5_ST_SZ_DW(query_virtq_out)] = {0};
1380         void *hdr = MLX5_ADDR_OF(query_virtq_out, in, hdr);
1381         void *virtq = MLX5_ADDR_OF(query_virtq_out, out, virtq);
1382         int ret;
1383
1384         MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
1385                  MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
1386         MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
1387                  MLX5_GENERAL_OBJ_TYPE_VIRTQ);
1388         MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, virtq_obj->id);
1389         ret = mlx5_glue->devx_obj_query(virtq_obj->obj, in, sizeof(in),
1390                                          out, sizeof(out));
1391         if (ret) {
1392                 DRV_LOG(ERR, "Failed to modify VIRTQ using DevX.");
1393                 rte_errno = errno;
1394                 return -errno;
1395         }
1396         attr->hw_available_index = MLX5_GET16(virtio_net_q, virtq,
1397                                               hw_available_index);
1398         attr->hw_used_index = MLX5_GET16(virtio_net_q, virtq, hw_used_index);
1399         return ret;
1400 }
1401
1402 /**
1403  * Create QP using DevX API.
1404  *
1405  * @param[in] ctx
1406  *   Context returned from mlx5 open_device() glue function.
1407  * @param [in] attr
1408  *   Pointer to QP attributes structure.
1409  *
1410  * @return
1411  *   The DevX object created, NULL otherwise and rte_errno is set.
1412  */
1413 struct mlx5_devx_obj *
1414 mlx5_devx_cmd_create_qp(void *ctx,
1415                         struct mlx5_devx_qp_attr *attr)
1416 {
1417         uint32_t in[MLX5_ST_SZ_DW(create_qp_in)] = {0};
1418         uint32_t out[MLX5_ST_SZ_DW(create_qp_out)] = {0};
1419         struct mlx5_devx_obj *qp_obj = rte_zmalloc(__func__, sizeof(*qp_obj),
1420                                                    0);
1421         void *qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
1422
1423         if (!qp_obj) {
1424                 DRV_LOG(ERR, "Failed to allocate QP data.");
1425                 rte_errno = ENOMEM;
1426                 return NULL;
1427         }
1428         MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
1429         MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
1430         MLX5_SET(qpc, qpc, pd, attr->pd);
1431         if (attr->uar_index) {
1432                 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
1433                 MLX5_SET(qpc, qpc, uar_page, attr->uar_index);
1434                 MLX5_SET(qpc, qpc, log_page_size, attr->log_page_size -
1435                          MLX5_ADAPTER_PAGE_SHIFT);
1436                 if (attr->sq_size) {
1437                         MLX5_ASSERT(RTE_IS_POWER_OF_2(attr->sq_size));
1438                         MLX5_SET(qpc, qpc, cqn_snd, attr->cqn);
1439                         MLX5_SET(qpc, qpc, log_sq_size,
1440                                  rte_log2_u32(attr->sq_size));
1441                 } else {
1442                         MLX5_SET(qpc, qpc, no_sq, 1);
1443                 }
1444                 if (attr->rq_size) {
1445                         MLX5_ASSERT(RTE_IS_POWER_OF_2(attr->rq_size));
1446                         MLX5_SET(qpc, qpc, cqn_rcv, attr->cqn);
1447                         MLX5_SET(qpc, qpc, log_rq_stride, attr->log_rq_stride -
1448                                  MLX5_LOG_RQ_STRIDE_SHIFT);
1449                         MLX5_SET(qpc, qpc, log_rq_size,
1450                                  rte_log2_u32(attr->rq_size));
1451                         MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ);
1452                 } else {
1453                         MLX5_SET(qpc, qpc, rq_type, MLX5_ZERO_LEN_RQ);
1454                 }
1455                 if (attr->dbr_umem_valid) {
1456                         MLX5_SET(qpc, qpc, dbr_umem_valid,
1457                                  attr->dbr_umem_valid);
1458                         MLX5_SET(qpc, qpc, dbr_umem_id, attr->dbr_umem_id);
1459                 }
1460                 MLX5_SET64(qpc, qpc, dbr_addr, attr->dbr_address);
1461                 MLX5_SET64(create_qp_in, in, wq_umem_offset,
1462                            attr->wq_umem_offset);
1463                 MLX5_SET(create_qp_in, in, wq_umem_id, attr->wq_umem_id);
1464                 MLX5_SET(create_qp_in, in, wq_umem_valid, 1);
1465         } else {
1466                 /* Special QP to be managed by FW - no SQ\RQ\CQ\UAR\DB rec. */
1467                 MLX5_SET(qpc, qpc, rq_type, MLX5_ZERO_LEN_RQ);
1468                 MLX5_SET(qpc, qpc, no_sq, 1);
1469         }
1470         qp_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out,
1471                                                  sizeof(out));
1472         if (!qp_obj->obj) {
1473                 rte_errno = errno;
1474                 DRV_LOG(ERR, "Failed to create QP Obj using DevX.");
1475                 rte_free(qp_obj);
1476                 return NULL;
1477         }
1478         qp_obj->id = MLX5_GET(create_qp_out, out, qpn);
1479         return qp_obj;
1480 }
1481
1482 /**
1483  * Modify QP using DevX API.
1484  * Currently supports only force loop-back QP.
1485  *
1486  * @param[in] qp
1487  *   Pointer to QP object structure.
1488  * @param [in] qp_st_mod_op
1489  *   The QP state modification operation.
1490  * @param [in] remote_qp_id
1491  *   The remote QP ID for MLX5_CMD_OP_INIT2RTR_QP operation.
1492  *
1493  * @return
1494  *   0 on success, a negative errno value otherwise and rte_errno is set.
1495  */
1496 int
1497 mlx5_devx_cmd_modify_qp_state(struct mlx5_devx_obj *qp, uint32_t qp_st_mod_op,
1498                               uint32_t remote_qp_id)
1499 {
1500         union {
1501                 uint32_t rst2init[MLX5_ST_SZ_DW(rst2init_qp_in)];
1502                 uint32_t init2rtr[MLX5_ST_SZ_DW(init2rtr_qp_in)];
1503                 uint32_t rtr2rts[MLX5_ST_SZ_DW(rtr2rts_qp_in)];
1504         } in;
1505         union {
1506                 uint32_t rst2init[MLX5_ST_SZ_DW(rst2init_qp_out)];
1507                 uint32_t init2rtr[MLX5_ST_SZ_DW(init2rtr_qp_out)];
1508                 uint32_t rtr2rts[MLX5_ST_SZ_DW(rtr2rts_qp_out)];
1509         } out;
1510         void *qpc;
1511         int ret;
1512         unsigned int inlen;
1513         unsigned int outlen;
1514
1515         memset(&in, 0, sizeof(in));
1516         memset(&out, 0, sizeof(out));
1517         MLX5_SET(rst2init_qp_in, &in, opcode, qp_st_mod_op);
1518         switch (qp_st_mod_op) {
1519         case MLX5_CMD_OP_RST2INIT_QP:
1520                 MLX5_SET(rst2init_qp_in, &in, qpn, qp->id);
1521                 qpc = MLX5_ADDR_OF(rst2init_qp_in, &in, qpc);
1522                 MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1);
1523                 MLX5_SET(qpc, qpc, rre, 1);
1524                 MLX5_SET(qpc, qpc, rwe, 1);
1525                 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
1526                 inlen = sizeof(in.rst2init);
1527                 outlen = sizeof(out.rst2init);
1528                 break;
1529         case MLX5_CMD_OP_INIT2RTR_QP:
1530                 MLX5_SET(init2rtr_qp_in, &in, qpn, qp->id);
1531                 qpc = MLX5_ADDR_OF(init2rtr_qp_in, &in, qpc);
1532                 MLX5_SET(qpc, qpc, primary_address_path.fl, 1);
1533                 MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1);
1534                 MLX5_SET(qpc, qpc, mtu, 1);
1535                 MLX5_SET(qpc, qpc, log_msg_max, 30);
1536                 MLX5_SET(qpc, qpc, remote_qpn, remote_qp_id);
1537                 MLX5_SET(qpc, qpc, min_rnr_nak, 0);
1538                 inlen = sizeof(in.init2rtr);
1539                 outlen = sizeof(out.init2rtr);
1540                 break;
1541         case MLX5_CMD_OP_RTR2RTS_QP:
1542                 qpc = MLX5_ADDR_OF(rtr2rts_qp_in, &in, qpc);
1543                 MLX5_SET(rtr2rts_qp_in, &in, qpn, qp->id);
1544                 MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 14);
1545                 MLX5_SET(qpc, qpc, log_ack_req_freq, 0);
1546                 MLX5_SET(qpc, qpc, retry_count, 7);
1547                 MLX5_SET(qpc, qpc, rnr_retry, 7);
1548                 inlen = sizeof(in.rtr2rts);
1549                 outlen = sizeof(out.rtr2rts);
1550                 break;
1551         default:
1552                 DRV_LOG(ERR, "Invalid or unsupported QP modify op %u.",
1553                         qp_st_mod_op);
1554                 rte_errno = EINVAL;
1555                 return -rte_errno;
1556         }
1557         ret = mlx5_glue->devx_obj_modify(qp->obj, &in, inlen, &out, outlen);
1558         if (ret) {
1559                 DRV_LOG(ERR, "Failed to modify QP using DevX.");
1560                 rte_errno = errno;
1561                 return -errno;
1562         }
1563         return ret;
1564 }
1565
1566 struct mlx5_devx_obj *
1567 mlx5_devx_cmd_create_virtio_q_counters(void *ctx)
1568 {
1569         uint32_t in[MLX5_ST_SZ_DW(create_virtio_q_counters_in)] = {0};
1570         uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
1571         struct mlx5_devx_obj *couners_obj = rte_zmalloc(__func__,
1572                                                        sizeof(*couners_obj), 0);
1573         void *hdr = MLX5_ADDR_OF(create_virtio_q_counters_in, in, hdr);
1574
1575         if (!couners_obj) {
1576                 DRV_LOG(ERR, "Failed to allocate virtio queue counters data.");
1577                 rte_errno = ENOMEM;
1578                 return NULL;
1579         }
1580         MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
1581                  MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
1582         MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
1583                  MLX5_GENERAL_OBJ_TYPE_VIRTIO_Q_COUNTERS);
1584         couners_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out,
1585                                                       sizeof(out));
1586         if (!couners_obj->obj) {
1587                 rte_errno = errno;
1588                 DRV_LOG(ERR, "Failed to create virtio queue counters Obj using"
1589                         " DevX.");
1590                 rte_free(couners_obj);
1591                 return NULL;
1592         }
1593         couners_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
1594         return couners_obj;
1595 }
1596
1597 int
1598 mlx5_devx_cmd_query_virtio_q_counters(struct mlx5_devx_obj *couners_obj,
1599                                    struct mlx5_devx_virtio_q_couners_attr *attr)
1600 {
1601         uint32_t in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0};
1602         uint32_t out[MLX5_ST_SZ_DW(query_virtio_q_counters_out)] = {0};
1603         void *hdr = MLX5_ADDR_OF(query_virtio_q_counters_out, in, hdr);
1604         void *virtio_q_counters = MLX5_ADDR_OF(query_virtio_q_counters_out, out,
1605                                                virtio_q_counters);
1606         int ret;
1607
1608         MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
1609                  MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
1610         MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
1611                  MLX5_GENERAL_OBJ_TYPE_VIRTIO_Q_COUNTERS);
1612         MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, couners_obj->id);
1613         ret = mlx5_glue->devx_obj_query(couners_obj->obj, in, sizeof(in), out,
1614                                         sizeof(out));
1615         if (ret) {
1616                 DRV_LOG(ERR, "Failed to query virtio q counters using DevX.");
1617                 rte_errno = errno;
1618                 return -errno;
1619         }
1620         attr->received_desc = MLX5_GET64(virtio_q_counters, virtio_q_counters,
1621                                          received_desc);
1622         attr->completed_desc = MLX5_GET64(virtio_q_counters, virtio_q_counters,
1623                                           completed_desc);
1624         attr->error_cqes = MLX5_GET(virtio_q_counters, virtio_q_counters,
1625                                     error_cqes);
1626         attr->bad_desc_errors = MLX5_GET(virtio_q_counters, virtio_q_counters,
1627                                          bad_desc_errors);
1628         attr->exceed_max_chain = MLX5_GET(virtio_q_counters, virtio_q_counters,
1629                                           exceed_max_chain);
1630         attr->invalid_buffer = MLX5_GET(virtio_q_counters, virtio_q_counters,
1631                                         invalid_buffer);
1632         return ret;
1633 }