Query software parsing supported on the NIC.
Save the offloads values in a config parameter.
This is needed for the outer IPv4 checksum and
IP and UDP tunneled packet TSO support.
Signed-off-by: Tal Shnaiderman <talshn@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
Tested-by: Idan Hackmon <idanhac@nvidia.com>
prf->obj = NULL;
}
+uint32_t
+mlx5_get_supported_sw_parsing_offloads(const struct mlx5_hca_attr *attr)
+{
+ uint32_t sw_parsing_offloads = 0;
+
+ if (attr->swp) {
+ sw_parsing_offloads |= MLX5_SW_PARSING_CAP;
+ if (attr->swp_csum)
+ sw_parsing_offloads |= MLX5_SW_PARSING_CSUM_CAP;
+
+ if (attr->swp_lso)
+ sw_parsing_offloads |= MLX5_SW_PARSING_TSO_CAP;
+ }
+ return sw_parsing_offloads;
+}
+
/*
* Allocate Rx and Tx UARs in robust fashion.
* This routine handles the following UAR allocation issues:
struct rte_flow_action_conntrack *profile);
int mlx5_aso_ct_available(struct mlx5_dev_ctx_shared *sh,
struct mlx5_aso_ct_action *ct);
+uint32_t
+mlx5_get_supported_sw_parsing_offloads(const struct mlx5_hca_attr *attr);
#endif /* RTE_PMD_MLX5_H_ */
device_attr->max_rwq_indirection_table_size =
1 << hca_attr.rss_ind_tbl_cap;
}
+ device_attr->sw_parsing_offloads =
+ mlx5_get_supported_sw_parsing_offloads(&hca_attr);
pv_iseg = mlx5_glue->query_hca_iseg(mlx5_ctx, &cb_iseg);
if (pv_iseg == NULL) {
DRV_LOG(ERR, "Failed to get device hca_iseg");
}
DRV_LOG(DEBUG, "MPW isn't supported");
mlx5_os_get_dev_attr(sh->ctx, &device_attr);
- config->swp = 0;
+ config->swp = device_attr.sw_parsing_offloads &
+ (MLX5_SW_PARSING_CAP | MLX5_SW_PARSING_CSUM_CAP |
+ MLX5_SW_PARSING_TSO_CAP);
config->ind_table_max_size =
sh->device_attr.max_rwq_indirection_table_size;
cqe_comp = 0;
#define MLX5_NAMESIZE MLX5_FS_NAME_MAX
+enum mlx5_sw_parsing_offloads {
+ MLX5_SW_PARSING_CAP = 1 << 0,
+ MLX5_SW_PARSING_CSUM_CAP = 1 << 1,
+ MLX5_SW_PARSING_TSO_CAP = 1 << 2,
+};
+
#endif /* RTE_PMD_MLX5_OS_H_ */