dcs->obj = mlx5_glue->devx_obj_create(ctx, in,
sizeof(in), out, sizeof(out));
if (!dcs->obj) {
- DRV_LOG(ERR, "Can't allocate counters - error %d\n", errno);
+ DRV_LOG(ERR, "Can't allocate counters - error %d", errno);
rte_errno = errno;
rte_free(dcs);
return NULL;
out_len, async_id,
cmd_comp);
if (rc) {
- DRV_LOG(ERR, "Failed to query devx counters with rc %d\n ", rc);
+ DRV_LOG(ERR, "Failed to query devx counters with rc %d", rc);
rte_errno = rc;
return -rc;
}
mkey->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out,
sizeof(out));
if (!mkey->obj) {
- DRV_LOG(ERR, "Can't create mkey - error %d\n", errno);
+ DRV_LOG(ERR, "Can't create mkey - error %d", errno);
rte_errno = errno;
rte_free(mkey);
return NULL;
if (status) {
int syndrome = MLX5_GET(query_flow_counter_out, out, syndrome);
- DRV_LOG(ERR, "Bad devX status %x, syndrome = %x\n", status,
+ DRV_LOG(ERR, "Bad devX status %x, syndrome = %x", status,
syndrome);
}
return status;
log_max_hairpin_wq_data_sz);
attr->log_max_hairpin_num_packets = MLX5_GET
(cmd_hca_cap, hcattr, log_min_hairpin_wq_data_sz);
+ attr->vhca_id = MLX5_GET(cmd_hca_cap, hcattr, vhca_id);
attr->eth_net_offloads = MLX5_GET(cmd_hca_cap, hcattr,
eth_net_offloads);
attr->eth_virt = MLX5_GET(cmd_hca_cap, hcattr, eth_virt);
attr->flex_parser_protocols = MLX5_GET(cmd_hca_cap, hcattr,
flex_parser_protocols);
+ attr->qos.sup = MLX5_GET(cmd_hca_cap, hcattr, qos);
+ if (attr->qos.sup) {
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP |
+ MLX5_HCA_CAP_OPMOD_GET_CUR);
+ rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in),
+ out, sizeof(out));
+ if (rc)
+ goto error;
+ if (status) {
+ DRV_LOG(DEBUG, "Failed to query devx QOS capabilities,"
+ " status %x, syndrome = %x",
+ status, syndrome);
+ return -1;
+ }
+ hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
+ attr->qos.srtcm_sup =
+ MLX5_GET(qos_cap, hcattr, flow_meter_srtcm);
+ attr->qos.log_max_flow_meter =
+ MLX5_GET(qos_cap, hcattr, log_max_flow_meter);
+ attr->qos.flow_meter_reg_c_ids =
+ MLX5_GET(qos_cap, hcattr, flow_meter_reg_id);
+ }
if (!attr->eth_net_offloads)
return 0;
hcattr, max_geneve_opt_len);
attr->wqe_inline_mode = MLX5_GET(per_protocol_networking_offload_caps,
hcattr, wqe_inline_mode);
+ attr->tunnel_stateless_gtp = MLX5_GET
+ (per_protocol_networking_offload_caps,
+ hcattr, tunnel_stateless_gtp);
if (attr->wqe_inline_mode != MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
return 0;
if (attr->eth_virt) {
transport_domain);
return td;
}
+
+/**
+ * Dump all flows to file.
+ *
+ * @param[in] sh
+ * Pointer to context.
+ * @param[out] file
+ * Pointer to file stream.
+ *
+ * @return
+ * 0 on success, a nagative value otherwise.
+ */
+int
+mlx5_devx_cmd_flow_dump(struct mlx5_ibv_shared *sh __rte_unused,
+ FILE *file __rte_unused)
+{
+ int ret = 0;
+
+#ifdef HAVE_MLX5_DR_FLOW_DUMP
+ if (sh->fdb_domain) {
+ ret = mlx5_glue->dr_dump_domain(file, sh->fdb_domain);
+ if (ret)
+ return ret;
+ }
+ assert(sh->rx_domain);
+ ret = mlx5_glue->dr_dump_domain(file, sh->rx_domain);
+ if (ret)
+ return ret;
+ assert(sh->tx_domain);
+ ret = mlx5_glue->dr_dump_domain(file, sh->tx_domain);
+#else
+ ret = ENOTSUP;
+#endif
+ return -ret;
+}