The code is updated to use the shared IB device context and
device handles. The IB device context is shared between
reprentors created over the single multiport IB device. All
Verbs and DevX objects will be created within this shared context.
Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
Acked-by: Shahaf Shuler <shahafs@mellanox.com>
DRV_LOG(DEBUG, "port %u closing device \"%s\"",
dev->data->port_id,
DRV_LOG(DEBUG, "port %u closing device \"%s\"",
dev->data->port_id,
- ((priv->ctx != NULL) ? priv->ctx->device->name : ""));
+ ((priv->sh->ctx != NULL) ? priv->sh->ctx->device->name : ""));
/* In case mlx5_dev_stop() has not been called. */
mlx5_dev_interrupt_handler_uninstall(dev);
mlx5_traffic_disable(dev);
/* In case mlx5_dev_stop() has not been called. */
mlx5_dev_interrupt_handler_uninstall(dev);
mlx5_traffic_disable(dev);
goto error;
}
priv->sh = sh;
goto error;
}
priv->sh = sh;
priv->ibv_port = spawn->ibv_port;
priv->mtu = ETHER_MTU;
#ifndef RTE_ARCH_64
priv->ibv_port = spawn->ibv_port;
priv->mtu = ETHER_MTU;
#ifndef RTE_ARCH_64
struct rte_eth_dev_data *dev_data; /* Pointer to device data. */
struct mlx5_ibv_shared *sh; /* Shared IB device context. */
uint32_t ibv_port; /* IB device port number. */
struct rte_eth_dev_data *dev_data; /* Pointer to device data. */
struct mlx5_ibv_shared *sh; /* Shared IB device context. */
uint32_t ibv_port; /* IB device port number. */
- struct ibv_context *ctx; /* Verbs context. */
struct ether_addr mac[MLX5_MAX_MAC_ADDRESSES]; /* MAC addresses. */
BITFIELD_DECLARE(mac_own, uint64_t, MLX5_MAX_MAC_ADDRESSES);
/* Bit-field of MAC addresses owned by the PMD. */
struct ether_addr mac[MLX5_MAX_MAC_ADDRESSES]; /* MAC addresses. */
BITFIELD_DECLARE(mac_own, uint64_t, MLX5_MAX_MAC_ADDRESSES);
/* Bit-field of MAC addresses owned by the PMD. */
}
/* Read all message and acknowledge them. */
for (;;) {
}
/* Read all message and acknowledge them. */
for (;;) {
- if (mlx5_glue->get_async_event(priv->ctx, &event))
+ if (mlx5_glue->get_async_event(priv->sh->ctx, &event))
break;
if ((event.event_type == IBV_EVENT_PORT_ACTIVE ||
event.event_type == IBV_EVENT_PORT_ERR) &&
break;
if ((event.event_type == IBV_EVENT_PORT_ACTIVE ||
event.event_type == IBV_EVENT_PORT_ERR) &&
mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ struct ibv_context *ctx = priv->sh->ctx;
- assert(priv->ctx->async_fd > 0);
- flags = fcntl(priv->ctx->async_fd, F_GETFL);
- ret = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
+ assert(ctx->async_fd > 0);
+ flags = fcntl(ctx->async_fd, F_GETFL);
+ ret = fcntl(ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
if (ret) {
DRV_LOG(INFO,
"port %u failed to change file descriptor async event"
if (ret) {
DRV_LOG(INFO,
"port %u failed to change file descriptor async event"
}
if (dev->data->dev_conf.intr_conf.lsc ||
dev->data->dev_conf.intr_conf.rmv) {
}
if (dev->data->dev_conf.intr_conf.lsc ||
dev->data->dev_conf.intr_conf.rmv) {
- priv->intr_handle.fd = priv->ctx->async_fd;
+ priv->intr_handle.fd = ctx->async_fd;
priv->intr_handle.type = RTE_INTR_HANDLE_EXT;
rte_intr_callback_register(&priv->intr_handle,
mlx5_dev_interrupt_handler, dev);
priv->intr_handle.type = RTE_INTR_HANDLE_EXT;
rte_intr_callback_register(&priv->intr_handle,
mlx5_dev_interrupt_handler, dev);
struct ibv_device_attr device_attr;
struct mlx5_priv *priv = dev->data->dev_private;
struct ibv_device_attr device_attr;
struct mlx5_priv *priv = dev->data->dev_private;
- if (mlx5_glue->query_device(priv->ctx, &device_attr) == EIO)
+ if (mlx5_glue->query_device(priv->sh->ctx, &device_attr) == EIO)
*cache_resource = *resource;
cache_resource->verbs_action =
mlx5_glue->dv_create_flow_action_packet_reformat
*cache_resource = *resource;
cache_resource->verbs_action =
mlx5_glue->dv_create_flow_action_packet_reformat
- (priv->ctx, cache_resource->size,
+ (priv->sh->ctx, cache_resource->size,
(cache_resource->size ? cache_resource->buf : NULL),
cache_resource->reformat_type,
cache_resource->ft_type);
(cache_resource->size ? cache_resource->buf : NULL),
cache_resource->reformat_type,
cache_resource->ft_type);
*cache_resource = *resource;
cache_resource->verbs_action =
mlx5_glue->dv_create_flow_action_modify_header
*cache_resource = *resource;
cache_resource->verbs_action =
mlx5_glue->dv_create_flow_action_modify_header
cache_resource->actions_num *
sizeof(cache_resource->actions[0]),
(uint64_t *)cache_resource->actions,
cache_resource->actions_num *
sizeof(cache_resource->actions[0]),
(uint64_t *)cache_resource->actions,
ret = -ENOMEM;
goto error_exit;
}
ret = -ENOMEM;
goto error_exit;
}
- ret = mlx5_devx_cmd_flow_counter_alloc(priv->ctx, dcs);
+ ret = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, dcs);
if (ret)
goto error_exit;
struct mlx5_flow_counter tmpl = {
if (ret)
goto error_exit;
struct mlx5_flow_counter tmpl = {
if (matcher->egress)
dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
cache_matcher->matcher_object =
if (matcher->egress)
dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
cache_matcher->matcher_object =
- mlx5_glue->dv_create_flow_matcher(priv->ctx, &dv_attr);
+ mlx5_glue->dv_create_flow_matcher(priv->sh->ctx, &dv_attr);
if (!cache_matcher->matcher_object) {
rte_free(cache_matcher);
return rte_flow_error_set(error, ENOMEM,
if (!cache_matcher->matcher_object) {
rte_free(cache_matcher);
return rte_flow_error_set(error, ENOMEM,
{
#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
struct mlx5_priv *priv = dev->data->dev_private;
{
#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
struct mlx5_priv *priv = dev->data->dev_private;
+ struct ibv_context *ctx = priv->sh->ctx;
struct ibv_counter_set_init_attr init = {
.counter_set_id = counter->id};
struct ibv_counter_set_init_attr init = {
.counter_set_id = counter->id};
- counter->cs = mlx5_glue->create_counter_set(priv->ctx, &init);
+ counter->cs = mlx5_glue->create_counter_set(ctx, &init);
if (!counter->cs) {
rte_errno = ENOTSUP;
return -ENOTSUP;
if (!counter->cs) {
rte_errno = ENOTSUP;
return -ENOTSUP;
return 0;
#elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
struct mlx5_priv *priv = dev->data->dev_private;
return 0;
#elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
struct mlx5_priv *priv = dev->data->dev_private;
+ struct ibv_context *ctx = priv->sh->ctx;
struct ibv_counters_init_attr init = {0};
struct ibv_counter_attach_attr attach;
int ret;
memset(&attach, 0, sizeof(attach));
struct ibv_counters_init_attr init = {0};
struct ibv_counter_attach_attr attach;
int ret;
memset(&attach, 0, sizeof(attach));
- counter->cs = mlx5_glue->create_counters(priv->ctx, &init);
+ counter->cs = mlx5_glue->create_counters(ctx, &init);
if (!counter->cs) {
rte_errno = ENOTSUP;
return -ENOTSUP;
if (!counter->cs) {
rte_errno = ENOTSUP;
return -ENOTSUP;
}
tmpl->rxq_ctrl = rxq_ctrl;
if (rxq_ctrl->irq) {
}
tmpl->rxq_ctrl = rxq_ctrl;
if (rxq_ctrl->irq) {
- tmpl->channel = mlx5_glue->create_comp_channel(priv->ctx);
+ tmpl->channel = mlx5_glue->create_comp_channel(priv->sh->ctx);
if (!tmpl->channel) {
DRV_LOG(ERR, "port %u: comp channel creation failure",
dev->data->port_id);
if (!tmpl->channel) {
DRV_LOG(ERR, "port %u: comp channel creation failure",
dev->data->port_id);
}
#endif
tmpl->cq = mlx5_glue->cq_ex_to_cq
}
#endif
tmpl->cq = mlx5_glue->cq_ex_to_cq
- (mlx5_glue->dv_create_cq(priv->ctx, &attr.cq.ibv,
+ (mlx5_glue->dv_create_cq(priv->sh->ctx, &attr.cq.ibv,
&attr.cq.mlx5));
if (tmpl->cq == NULL) {
DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
&attr.cq.mlx5));
if (tmpl->cq == NULL) {
DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
.two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
};
}
.two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
};
}
- tmpl->wq = mlx5_glue->dv_create_wq(priv->ctx, &attr.wq.ibv,
+ tmpl->wq = mlx5_glue->dv_create_wq(priv->sh->ctx, &attr.wq.ibv,
- tmpl->wq = mlx5_glue->create_wq(priv->ctx, &attr.wq.ibv);
+ tmpl->wq = mlx5_glue->create_wq(priv->sh->ctx, &attr.wq.ibv);
#endif
if (tmpl->wq == NULL) {
DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
#endif
if (tmpl->wq == NULL) {
DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
for (j = 0; i != (unsigned int)(1 << wq_n); ++i, ++j)
wq[i] = wq[j];
ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
for (j = 0; i != (unsigned int)(1 << wq_n); ++i, ++j)
wq[i] = wq[j];
ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
&(struct ibv_rwq_ind_table_init_attr){
.log_ind_tbl_size = wq_n,
.ind_tbl = wq,
&(struct ibv_rwq_ind_table_init_attr){
.log_ind_tbl_size = wq_n,
.ind_tbl = wq,
}
#endif
qp = mlx5_glue->dv_create_qp
}
#endif
qp = mlx5_glue->dv_create_qp
&(struct ibv_qp_init_attr_ex){
.qp_type = IBV_QPT_RAW_PACKET,
.comp_mask =
&(struct ibv_qp_init_attr_ex){
.qp_type = IBV_QPT_RAW_PACKET,
.comp_mask =
&qp_init_attr);
#else
qp = mlx5_glue->create_qp_ex
&qp_init_attr);
#else
qp = mlx5_glue->create_qp_ex
&(struct ibv_qp_init_attr_ex){
.qp_type = IBV_QPT_RAW_PACKET,
.comp_mask =
&(struct ibv_qp_init_attr_ex){
.qp_type = IBV_QPT_RAW_PACKET,
.comp_mask =
mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ struct ibv_context *ctx = priv->sh->ctx;
struct ibv_cq *cq;
struct ibv_wq *wq = NULL;
struct mlx5_rxq_ibv *rxq;
if (priv->drop_queue.rxq)
return priv->drop_queue.rxq;
struct ibv_cq *cq;
struct ibv_wq *wq = NULL;
struct mlx5_rxq_ibv *rxq;
if (priv->drop_queue.rxq)
return priv->drop_queue.rxq;
- cq = mlx5_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
+ cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
if (!cq) {
DEBUG("port %u cannot allocate CQ for drop queue",
dev->data->port_id);
rte_errno = errno;
goto error;
}
if (!cq) {
DEBUG("port %u cannot allocate CQ for drop queue",
dev->data->port_id);
rte_errno = errno;
goto error;
}
- wq = mlx5_glue->create_wq(priv->ctx,
+ wq = mlx5_glue->create_wq(ctx,
&(struct ibv_wq_init_attr){
.wq_type = IBV_WQT_RQ,
.max_wr = 1,
&(struct ibv_wq_init_attr){
.wq_type = IBV_WQT_RQ,
.max_wr = 1,
if (!rxq)
return NULL;
tmpl.ind_table = mlx5_glue->create_rwq_ind_table
if (!rxq)
return NULL;
tmpl.ind_table = mlx5_glue->create_rwq_ind_table
&(struct ibv_rwq_ind_table_init_attr){
.log_ind_tbl_size = 0,
.ind_tbl = &rxq->wq,
&(struct ibv_rwq_ind_table_init_attr){
.log_ind_tbl_size = 0,
.ind_tbl = &rxq->wq,
ind_tbl = mlx5_ind_table_ibv_drop_new(dev);
if (!ind_tbl)
return NULL;
ind_tbl = mlx5_ind_table_ibv_drop_new(dev);
if (!ind_tbl)
return NULL;
- qp = mlx5_glue->create_qp_ex(priv->ctx,
+ qp = mlx5_glue->create_qp_ex(priv->sh->ctx,
&(struct ibv_qp_init_attr_ex){
.qp_type = IBV_QPT_RAW_PACKET,
.comp_mask =
&(struct ibv_qp_init_attr_ex){
.qp_type = IBV_QPT_RAW_PACKET,
.comp_mask =
assert(cmsg != NULL);
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
assert(cmsg != NULL);
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
- cmsg->cmsg_len = CMSG_LEN(sizeof(priv->ctx->cmd_fd));
+ cmsg->cmsg_len = CMSG_LEN(sizeof(priv->sh->ctx->cmd_fd));
fd = (int *)CMSG_DATA(cmsg);
fd = (int *)CMSG_DATA(cmsg);
- *fd = priv->ctx->cmd_fd;
+ *fd = priv->sh->ctx->cmd_fd;
ret = sendmsg(conn_sock, &msg, 0);
if (ret < 0)
DRV_LOG(WARNING, "port %u cannot send response",
ret = sendmsg(conn_sock, &msg, 0);
if (ret < 0)
DRV_LOG(WARNING, "port %u cannot send response",
- ret = mlx5_tx_uar_remap(dev, priv->ctx->cmd_fd);
+ ret = mlx5_tx_uar_remap(dev, priv->sh->ctx->cmd_fd);
if (ret) {
/* Adjust index for rollback. */
i = priv->txqs_n - 1;
if (ret) {
/* Adjust index for rollback. */
i = priv->txqs_n - 1;
((desc / MLX5_TX_COMP_THRESH) - 1) : 1;
if (is_empw_burst_func(tx_pkt_burst))
cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV;
((desc / MLX5_TX_COMP_THRESH) - 1) : 1;
if (is_empw_burst_func(tx_pkt_burst))
cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV;
- tmpl.cq = mlx5_glue->create_cq(priv->ctx, cqe_n, NULL, NULL, 0);
+ tmpl.cq = mlx5_glue->create_cq(priv->sh->ctx, cqe_n, NULL, NULL, 0);
if (tmpl.cq == NULL) {
DRV_LOG(ERR, "port %u Tx queue %u CQ creation failure",
dev->data->port_id, idx);
if (tmpl.cq == NULL) {
DRV_LOG(ERR, "port %u Tx queue %u CQ creation failure",
dev->data->port_id, idx);
attr.init.max_tso_header = txq_ctrl->max_tso_header;
attr.init.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
}
attr.init.max_tso_header = txq_ctrl->max_tso_header;
attr.init.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
}
- tmpl.qp = mlx5_glue->create_qp_ex(priv->ctx, &attr.init);
+ tmpl.qp = mlx5_glue->create_qp_ex(priv->sh->ctx, &attr.init);
if (tmpl.qp == NULL) {
DRV_LOG(ERR, "port %u Tx queue %u QP creation failure",
dev->data->port_id, idx);
if (tmpl.qp == NULL) {
DRV_LOG(ERR, "port %u Tx queue %u QP creation failure",
dev->data->port_id, idx);