net/mlx5: switch to the shared protection domain
authorViacheslav Ovsiienko <viacheslavo@mellanox.com>
Wed, 27 Mar 2019 13:15:41 +0000 (13:15 +0000)
committerFerruh Yigit <ferruh.yigit@intel.com>
Fri, 29 Mar 2019 16:25:32 +0000 (17:25 +0100)
The PMD code is updated to use Protected Domain from the
shared IB device context. The Domain is shared between
all devices belonging to the same multiport Infiniband device.
If IB device has only one port, the PD is not shared, because
there is only ethernet device created over IB one.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
Acked-by: Shahaf Shuler <shahafs@mellanox.com>
drivers/net/mlx5/mlx5.c
drivers/net/mlx5/mlx5.h
drivers/net/mlx5/mlx5_mr.c
drivers/net/mlx5/mlx5_rxq.c
drivers/net/mlx5/mlx5_txq.c

index ac6e841..6313824 100644 (file)
@@ -1098,7 +1098,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        priv->ctx = sh->ctx;
        priv->ibv_port = spawn->ibv_port;
        priv->device_attr = sh->device_attr;
-       priv->pd = sh->pd;
        priv->mtu = ETHER_MTU;
 #ifndef RTE_ARCH_64
        /* Initialize UAR access locks for 32bit implementations. */
index 56270a6..4213866 100644 (file)
@@ -225,7 +225,6 @@ struct mlx5_priv {
        uint32_t ibv_port; /* IB device port number. */
        struct ibv_context *ctx; /* Verbs context. */
        struct ibv_device_attr_ex device_attr; /* Device properties. */
-       struct ibv_pd *pd; /* Protection Domain. */
        struct ether_addr mac[MLX5_MAX_MAC_ADDRESSES]; /* MAC addresses. */
        BITFIELD_DECLARE(mac_own, uint64_t, MLX5_MAX_MAC_ADDRESSES);
        /* Bit-field of MAC addresses owned by the PMD. */
index 21f8b5e..0f0a64f 100644 (file)
@@ -720,7 +720,7 @@ alloc_resources:
         * mlx5_alloc_buf_extern() which eventually calls rte_malloc_socket()
         * through mlx5_alloc_verbs_buf().
         */
-       mr->ibv_mr = mlx5_glue->reg_mr(priv->pd, (void *)data.start, len,
+       mr->ibv_mr = mlx5_glue->reg_mr(priv->sh->pd, (void *)data.start, len,
                                       IBV_ACCESS_LOCAL_WRITE);
        if (mr->ibv_mr == NULL) {
                DEBUG("port %u fail to create a verbs MR for address (%p)",
@@ -1138,7 +1138,7 @@ mlx5_create_mr_ext(struct rte_eth_dev *dev, uintptr_t addr, size_t len,
                                RTE_CACHE_LINE_SIZE, socket_id);
        if (mr == NULL)
                return NULL;
-       mr->ibv_mr = mlx5_glue->reg_mr(priv->pd, (void *)addr, len,
+       mr->ibv_mr = mlx5_glue->reg_mr(priv->sh->pd, (void *)addr, len,
                                       IBV_ACCESS_LOCAL_WRITE);
        if (mr->ibv_mr == NULL) {
                DRV_LOG(WARNING,
index 2f60999..0496c4e 100644 (file)
@@ -867,7 +867,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
                .max_wr = wqe_n >> rxq_data->sges_n,
                /* Max number of scatter/gather elements in a WR. */
                .max_sge = 1 << rxq_data->sges_n,
-               .pd = priv->pd,
+               .pd = priv->sh->pd,
                .cq = tmpl->cq,
                .comp_mask =
                        IBV_WQ_FLAGS_CVLAN_STRIPPING |
@@ -1831,7 +1831,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
                                .rx_hash_fields_mask = hash_fields,
                        },
                        .rwq_ind_tbl = ind_tbl->ind_table,
-                       .pd = priv->pd,
+                       .pd = priv->sh->pd,
                 },
                 &qp_init_attr);
 #else
@@ -1850,7 +1850,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
                                .rx_hash_fields_mask = hash_fields,
                        },
                        .rwq_ind_tbl = ind_tbl->ind_table,
-                       .pd = priv->pd,
+                       .pd = priv->sh->pd,
                 });
 #endif
        if (!qp) {
@@ -2006,7 +2006,7 @@ mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev)
                        .wq_type = IBV_WQT_RQ,
                        .max_wr = 1,
                        .max_sge = 1,
-                       .pd = priv->pd,
+                       .pd = priv->sh->pd,
                        .cq = cq,
                 });
        if (!wq) {
@@ -2160,7 +2160,7 @@ mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
                                .rx_hash_fields_mask = 0,
                                },
                        .rwq_ind_tbl = ind_tbl->ind_table,
-                       .pd = priv->pd
+                       .pd = priv->sh->pd
                 });
        if (!qp) {
                DEBUG("port %u cannot allocate QP for drop queue",
index d185617..d3a5498 100644 (file)
@@ -426,7 +426,7 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
                 * Tx burst.
                 */
                .sq_sig_all = 0,
-               .pd = priv->pd,
+               .pd = priv->sh->pd,
                .comp_mask = IBV_QP_INIT_ATTR_PD,
        };
        if (txq_data->max_inline)