The Tx doorbell has different virtual addresses per process.
The secondary process takes the UAR physical page ID of the primary and
mmap it to its own virtual address.
The primary doorbell references were saved in two shared memory
locations: the TxQ structure and a dedicated doorbell array.
Remove the doorbell reference from the TxQ structure and move the
primary processes to take the UAR information from the primary doorbell
array.
Cc: stable@dpdk.org
Signed-off-by: Michael Baum <michaelba@nvidia.com>
Reviewed-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
}
}
#endif
- txq_ctrl->bf_reg = qp.bf.reg;
if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
DRV_LOG(DEBUG, "Port %u: uar_mmap_offset 0x%" PRIx64 ".",
dev->data->port_id, txq_ctrl->uar_mmap_offset);
} else {
DRV_LOG(ERR,
- "Port %u failed to retrieve UAR info, invalid"
- " libmlx5.so",
+ "Port %u failed to retrieve UAR info, invalid libmlx5.so",
dev->data->port_id);
rte_errno = EINVAL;
goto error;
}
- txq_uar_init(txq_ctrl);
+ txq_uar_init(txq_ctrl, qp.bf.reg);
dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
return 0;
error:
}
ppriv->uar_table_sz = priv->txqs_n;
dev->process_private = ppriv;
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ priv->sh->pppriv = ppriv;
return 0;
}
struct mlx5_devx_obj *td; /* Transport domain. */
struct mlx5_lag lag; /* LAG attributes */
void *tx_uar; /* Tx/packet pacing shared UAR. */
+ struct mlx5_proc_priv *pppriv; /* Pointer to primary private process. */
struct mlx5_ecpri_parser_profile ecpri_parser;
/* Flex parser profiles information. */
void *devx_rx_uar; /* DevX UAR for Rx. */
int mlx5_dev_check_sibling_config(struct mlx5_priv *priv,
struct mlx5_dev_config *config,
struct rte_device *dpdk_dev);
-int mlx5_dev_configure(struct rte_eth_dev *dev);
int mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info);
int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size);
int mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
struct mlx5_devx_cq_attr cq_attr = {
.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar),
};
- void *reg_addr;
uint32_t cqe_n, log_desc_n;
uint32_t wqe_n, wqe_size;
int ret = 0;
if (!priv->sh->tdn)
priv->sh->tdn = priv->sh->td->id;
#endif
- MLX5_ASSERT(sh->tx_uar);
- reg_addr = mlx5_os_get_devx_uar_reg_addr(sh->tx_uar);
- MLX5_ASSERT(reg_addr);
- txq_ctrl->bf_reg = reg_addr;
+ MLX5_ASSERT(sh->tx_uar && mlx5_os_get_devx_uar_reg_addr(sh->tx_uar));
txq_ctrl->uar_mmap_offset =
mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar);
- txq_uar_init(txq_ctrl);
+ txq_uar_init(txq_ctrl, mlx5_os_get_devx_uar_reg_addr(sh->tx_uar));
dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
return 0;
error:
struct mlx5_txq_obj *obj; /* Verbs/DevX queue object. */
struct mlx5_priv *priv; /* Back pointer to private data. */
off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
- void *bf_reg; /* BlueFlame register from Verbs. */
uint16_t dump_file_n; /* Number of dump files. */
struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */
uint32_t hairpin_status; /* Hairpin binding status. */
(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
const struct rte_eth_hairpin_conf *hairpin_conf);
void mlx5_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
-void txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl);
+void txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl, void *bf_reg);
int mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd);
void mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev);
int mlx5_txq_obj_verify(struct rte_eth_dev *dev);
*
* @param txq_ctrl
* Pointer to Tx queue control structure.
+ * @param bf_reg
+ * BlueFlame register from Verbs UAR.
*/
void
-txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl)
+txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl, void *bf_reg)
{
struct mlx5_priv *priv = txq_ctrl->priv;
struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
return;
MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
MLX5_ASSERT(ppriv);
- ppriv->uar_table[txq_ctrl->txq.idx] = txq_ctrl->bf_reg;
+ ppriv->uar_table[txq_ctrl->txq.idx] = bf_reg;
txq_uar_ncattr_init(txq_ctrl, page_size);
#ifndef RTE_ARCH_64
/* Assign an UAR lock according to UAR page number */
{
struct mlx5_priv *priv = txq_ctrl->priv;
struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
+ struct mlx5_proc_priv *primary_ppriv = priv->sh->pppriv;
struct mlx5_txq_data *txq = &txq_ctrl->txq;
void *addr;
uintptr_t uar_va;
* As rdma-core, UARs are mapped in size of OS page
* size. Ref to libmlx5 function: mlx5_init_context()
*/
- uar_va = (uintptr_t)txq_ctrl->bf_reg;
+ uar_va = (uintptr_t)primary_ppriv->uar_table[txq->idx];
offset = uar_va & (page_size - 1); /* Offset in page. */
addr = rte_mem_map(NULL, page_size, RTE_PROT_WRITE, RTE_MAP_SHARED,
- fd, txq_ctrl->uar_mmap_offset);
+ fd, txq_ctrl->uar_mmap_offset);
if (!addr) {
- DRV_LOG(ERR,
- "port %u mmap failed for BF reg of txq %u",
+ DRV_LOG(ERR, "Port %u mmap failed for BF reg of txq %u.",
txq->port_id, txq->idx);
rte_errno = ENXIO;
return -rte_errno;
}
addr = RTE_PTR_ADD(addr, offset);
ppriv->uar_table[txq->idx] = addr;
- txq_uar_ncattr_init(txq_ctrl, page_size);
return 0;
}