net/mlx5: support GTP
[dpdk.git] / drivers / net / mlx5 / mlx5_devx_cmds.c
index a501f1f..9985d30 100644 (file)
@@ -40,7 +40,7 @@ mlx5_devx_cmd_flow_counter_alloc(struct ibv_context *ctx, uint32_t bulk_n_128)
        dcs->obj = mlx5_glue->devx_obj_create(ctx, in,
                                              sizeof(in), out, sizeof(out));
        if (!dcs->obj) {
-               DRV_LOG(ERR, "Can't allocate counters - error %d\n", errno);
+               DRV_LOG(ERR, "Can't allocate counters - error %d", errno);
                rte_errno = errno;
                rte_free(dcs);
                return NULL;
@@ -111,7 +111,7 @@ mlx5_devx_cmd_flow_counter_query(struct mlx5_devx_obj *dcs,
                                                     out_len, async_id,
                                                     cmd_comp);
        if (rc) {
-               DRV_LOG(ERR, "Failed to query devx counters with rc %d\n ", rc);
+               DRV_LOG(ERR, "Failed to query devx counters with rc %d", rc);
                rte_errno = rc;
                return -rc;
        }
@@ -171,7 +171,7 @@ mlx5_devx_cmd_mkey_create(struct ibv_context *ctx,
        mkey->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out,
                                               sizeof(out));
        if (!mkey->obj) {
-               DRV_LOG(ERR, "Can't create mkey - error %d\n", errno);
+               DRV_LOG(ERR, "Can't create mkey - error %d", errno);
                rte_errno = errno;
                rte_free(mkey);
                return NULL;
@@ -202,7 +202,7 @@ mlx5_devx_get_out_command_status(void *out)
        if (status) {
                int syndrome = MLX5_GET(query_flow_counter_out, out, syndrome);
 
-               DRV_LOG(ERR, "Bad devX status %x, syndrome = %x\n", status,
+               DRV_LOG(ERR, "Bad devX status %x, syndrome = %x", status,
                        syndrome);
        }
        return status;
@@ -334,11 +334,35 @@ mlx5_devx_cmd_query_hca_attr(struct ibv_context *ctx,
                                                    log_max_hairpin_wq_data_sz);
        attr->log_max_hairpin_num_packets = MLX5_GET
                (cmd_hca_cap, hcattr, log_min_hairpin_wq_data_sz);
+       attr->vhca_id = MLX5_GET(cmd_hca_cap, hcattr, vhca_id);
        attr->eth_net_offloads = MLX5_GET(cmd_hca_cap, hcattr,
                                          eth_net_offloads);
        attr->eth_virt = MLX5_GET(cmd_hca_cap, hcattr, eth_virt);
        attr->flex_parser_protocols = MLX5_GET(cmd_hca_cap, hcattr,
                                               flex_parser_protocols);
+       attr->qos.sup = MLX5_GET(cmd_hca_cap, hcattr, qos);
+       if (attr->qos.sup) {
+               MLX5_SET(query_hca_cap_in, in, op_mod,
+                        MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP |
+                        MLX5_HCA_CAP_OPMOD_GET_CUR);
+               rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in),
+                                                out, sizeof(out));
+               if (rc)
+                       goto error;
+               if (status) {
+                       DRV_LOG(DEBUG, "Failed to query devx QOS capabilities,"
+                               " status %x, syndrome = %x",
+                               status, syndrome);
+                       return -1;
+               }
+               hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
+               attr->qos.srtcm_sup =
+                               MLX5_GET(qos_cap, hcattr, flow_meter_srtcm);
+               attr->qos.log_max_flow_meter =
+                               MLX5_GET(qos_cap, hcattr, log_max_flow_meter);
+               attr->qos.flow_meter_reg_c_ids =
+                       MLX5_GET(qos_cap, hcattr, flow_meter_reg_id);
+       }
        if (!attr->eth_net_offloads)
                return 0;
 
@@ -391,6 +415,9 @@ mlx5_devx_cmd_query_hca_attr(struct ibv_context *ctx,
                             hcattr, max_geneve_opt_len);
        attr->wqe_inline_mode = MLX5_GET(per_protocol_networking_offload_caps,
                                         hcattr, wqe_inline_mode);
+       attr->tunnel_stateless_gtp = MLX5_GET
+                                       (per_protocol_networking_offload_caps,
+                                        hcattr, tunnel_stateless_gtp);
        if (attr->wqe_inline_mode != MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
                return 0;
        if (attr->eth_virt) {
@@ -903,3 +930,38 @@ mlx5_devx_cmd_create_td(struct ibv_context *ctx)
                           transport_domain);
        return td;
 }
+
+/**
+ * Dump all flows to file.
+ *
+ * @param[in] sh
+ *   Pointer to context.
+ * @param[out] file
+ *   Pointer to file stream.
+ *
+ * @return
+ *   0 on success, a nagative value otherwise.
+ */
+int
+mlx5_devx_cmd_flow_dump(struct mlx5_ibv_shared *sh __rte_unused,
+                       FILE *file __rte_unused)
+{
+       int ret = 0;
+
+#ifdef HAVE_MLX5_DR_FLOW_DUMP
+       if (sh->fdb_domain) {
+               ret = mlx5_glue->dr_dump_domain(file, sh->fdb_domain);
+               if (ret)
+                       return ret;
+       }
+       assert(sh->rx_domain);
+       ret = mlx5_glue->dr_dump_domain(file, sh->rx_domain);
+       if (ret)
+               return ret;
+       assert(sh->tx_domain);
+       ret = mlx5_glue->dr_dump_domain(file, sh->tx_domain);
+#else
+       ret = ENOTSUP;
+#endif
+       return -ret;
+}