/* Device parameter to enable RX completion entry padding to 128B. */
#define MLX5_RXQ_CQE_PAD_EN "rxq_cqe_pad_en"
+/* Device parameter to enable padding Rx packet to cacheline size. */
+#define MLX5_RXQ_PKT_PAD_EN "rxq_pkt_pad_en"
+
/* Device parameter to enable Multi-Packet Rx queue. */
#define MLX5_RX_MPRQ_EN "mprq_en"
static void *
mlx5_alloc_verbs_buf(size_t size, void *data)
{
- struct priv *priv = data;
+ struct mlx5_priv *priv = data;
void *ret;
size_t alignment = sysconf(_SC_PAGESIZE);
unsigned int socket = SOCKET_ID_ANY;
static void
mlx5_dev_close(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i;
int ret;
i = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, i), i);
while (i--) {
- struct priv *opriv =
+ struct mlx5_priv *opriv =
rte_eth_devices[port_id[i]].data->dev_private;
if (!opriv ||
.xstats_get = mlx5_xstats_get,
.xstats_reset = mlx5_xstats_reset,
.xstats_get_names = mlx5_xstats_get_names,
+ .fw_version_get = mlx5_fw_version_get,
.dev_infos_get = mlx5_dev_infos_get,
.dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
.vlan_filter_set = mlx5_vlan_filter_set,
.is_removed = mlx5_is_removed,
};
+/* Available operations from secondary process. */
static const struct eth_dev_ops mlx5_dev_sec_ops = {
.stats_get = mlx5_stats_get,
.stats_reset = mlx5_stats_reset,
.xstats_get = mlx5_xstats_get,
.xstats_reset = mlx5_xstats_reset,
.xstats_get_names = mlx5_xstats_get_names,
+ .fw_version_get = mlx5_fw_version_get,
.dev_infos_get = mlx5_dev_infos_get,
.rx_descriptor_status = mlx5_rx_descriptor_status,
.tx_descriptor_status = mlx5_tx_descriptor_status,
};
-/* Available operators in flow isolated mode. */
+/* Available operations in flow isolated mode. */
const struct eth_dev_ops mlx5_dev_ops_isolate = {
.dev_configure = mlx5_dev_configure,
.dev_start = mlx5_dev_start,
.xstats_get = mlx5_xstats_get,
.xstats_reset = mlx5_xstats_reset,
.xstats_get_names = mlx5_xstats_get_names,
+ .fw_version_get = mlx5_fw_version_get,
.dev_infos_get = mlx5_dev_infos_get,
.dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
.vlan_filter_set = mlx5_vlan_filter_set,
config->cqe_comp = !!tmp;
} else if (strcmp(MLX5_RXQ_CQE_PAD_EN, key) == 0) {
config->cqe_pad = !!tmp;
+ } else if (strcmp(MLX5_RXQ_PKT_PAD_EN, key) == 0) {
+ config->hw_padding = !!tmp;
} else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) {
config->mprq.enabled = !!tmp;
} else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) {
const char **params = (const char *[]){
MLX5_RXQ_CQE_COMP_EN,
MLX5_RXQ_CQE_PAD_EN,
+ MLX5_RXQ_PKT_PAD_EN,
MLX5_RX_MPRQ_EN,
MLX5_RX_MPRQ_LOG_STRIDE_NUM,
MLX5_RX_MPRQ_MAX_MEMCPY_LEN,
static int
mlx5_uar_init_primary(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
void *addr = (void *)0;
if (uar_base) { /* UAR address space mapped. */
static int
mlx5_uar_init_secondary(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
void *addr;
assert(priv->uar_base);
struct ibv_pd *pd = NULL;
struct mlx5dv_context dv_attr = { .comp_mask = 0 };
struct rte_eth_dev *eth_dev = NULL;
- struct priv *priv = NULL;
+ struct mlx5_priv *priv = NULL;
int err = 0;
+ unsigned int hw_padding = 0;
unsigned int mps;
unsigned int cqe_comp;
unsigned int cqe_pad = 0;
}
/* Build device name. */
if (!switch_info->representor)
- rte_strlcpy(name, dpdk_dev->name, sizeof(name));
+ strlcpy(name, dpdk_dev->name, sizeof(name));
else
snprintf(name, sizeof(name), "%s_representor_%u",
dpdk_dev->name, switch_info->port_name);
i = RTE_MIN(mlx5_dev_to_port_id(dpdk_dev, port_id, i), i);
while (i--) {
- const struct priv *opriv =
+ const struct mlx5_priv *opriv =
rte_eth_devices[port_id[i]].data->dev_private;
if (!opriv ||
IBV_RAW_PACKET_CAP_SCATTER_FCS);
DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
(config.hw_fcs_strip ? "" : "not "));
-#ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
- config.hw_padding = !!attr.rx_pad_end_addr_align;
+#if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
+ hw_padding = !!attr.rx_pad_end_addr_align;
+#elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
+ hw_padding = !!(attr.device_cap_flags_ex &
+ IBV_DEVICE_PCI_WRITE_END_PADDING);
#endif
- DRV_LOG(DEBUG, "hardware Rx end alignment padding is %ssupported",
- (config.hw_padding ? "" : "not "));
+ if (config.hw_padding && !hw_padding) {
+ DRV_LOG(DEBUG, "Rx end alignment padding isn't supported");
+ config.hw_padding = 0;
+ } else if (config.hw_padding) {
+ DRV_LOG(DEBUG, "Rx end alignment padding is enabled");
+ }
config.tso = (attr.tso_caps.max_tso > 0 &&
(attr.tso_caps.supported_qpts &
(1 << IBV_QPT_RAW_PACKET)));
priv->config = config;
/* Supported Verbs flow priority number detection. */
err = mlx5_flow_discover_priorities(eth_dev);
- if (err < 0)
+ if (err < 0) {
+ err = -err;
goto error;
+ }
priv->config.flow_prio = err;
/*
* Once the device is added to the list of memory event
qsort(list, n, sizeof(*list), mlx5_dev_spawn_data_cmp);
/* Default configuration. */
dev_config = (struct mlx5_dev_config){
+ .hw_padding = 0,
.mps = MLX5_ARG_UNSET,
.tx_vec_en = 1,
.rx_vec_en = 1,
.id_table = mlx5_pci_id_map,
.probe = mlx5_pci_probe,
.remove = mlx5_pci_remove,
+ .dma_map = mlx5_dma_map,
+ .dma_unmap = mlx5_dma_unmap,
.drv_flags = (RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV |
RTE_PCI_DRV_PROBE_AGAIN),
};