priv->mtu = RTE_ETHER_MTU;
priv->mp_id.port_id = port_id;
strlcpy(priv->mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN);
-#ifndef RTE_ARCH_64
- /* Initialize UAR access locks for 32bit implementations. */
- rte_spinlock_init(&priv->uar_lock_cq);
- for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
- rte_spinlock_init(&priv->uar_lock[i]);
-#endif
/* Some internal functions rely on Netlink sockets, open them now. */
priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA);
priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE);
err = ENOMEM;
goto error;
}
+#ifndef RTE_ARCH_64
+ /* Initialize UAR access locks for 32bit implementations. */
+ rte_spinlock_init(&sh->uar_lock_cq);
+ for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
+ rte_spinlock_init(&sh->uar_lock[i]);
+#endif
/*
* Once the device is added to the list of memory event
* callback, its global MR cache table cannot be expanded
void *fdb_domain; /* FDB Direct Rules name space handle. */
void *rx_domain; /* RX Direct Rules name space handle. */
void *tx_domain; /* TX Direct Rules name space handle. */
+#ifndef RTE_ARCH_64
+ rte_spinlock_t uar_lock_cq; /* CQs share a common distinct UAR */
+ rte_spinlock_t uar_lock[MLX5_UAR_PAGE_NUM_MAX];
+ /* UAR same-page access control required in 32bit implementations. */
+#endif
struct mlx5_hlist *flow_tbls;
/* Direct Rules tables for FDB, NIC TX+RX */
void *esw_drop_action; /* Pointer to DR E-Switch drop action. */
uint8_t mtr_color_reg; /* Meter color match REG_C. */
struct mlx5_mtr_profiles flow_meter_profiles; /* MTR profile list. */
struct mlx5_flow_meters flow_meters; /* MTR list. */
-#ifndef RTE_ARCH_64
- rte_spinlock_t uar_lock_cq; /* CQs share a common distinct UAR */
- rte_spinlock_t uar_lock[MLX5_UAR_PAGE_NUM_MAX];
- /* UAR same-page access control required in 32bit implementations. */
-#endif
uint8_t skip_default_rss_reta; /* Skip configuration of default reta. */
uint8_t fdb_def_rule; /* Whether fdb jump to table 1 is configured. */
struct mlx5_mp_id mp_id; /* ID of a multi-process process */
tmpl->rxq.elts =
(struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
#ifndef RTE_ARCH_64
- tmpl->rxq.uar_lock_cq = &priv->uar_lock_cq;
+ tmpl->rxq.uar_lock_cq = &priv->sh->uar_lock_cq;
#endif
tmpl->rxq.idx = idx;
rte_atomic32_inc(&tmpl->refcnt);
/* Assign an UAR lock according to UAR page number */
lock_idx = (txq_ctrl->uar_mmap_offset / page_size) &
MLX5_UAR_PAGE_NUM_MASK;
- txq_ctrl->txq.uar_lock = &priv->uar_lock[lock_idx];
+ txq_ctrl->txq.uar_lock = &priv->sh->uar_lock[lock_idx];
#endif
}