net/mlx5: change device reference for secondary process
[dpdk.git] / drivers / net / mlx5 / mlx5_rxq.c
index 9653d1d..aa1ddd0 100644 (file)
@@ -78,7 +78,7 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
                buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
                if (buf == NULL) {
                        DRV_LOG(ERR, "port %u empty mbuf pool",
-                               rxq_ctrl->priv->dev->data->port_id);
+                               PORT_ID(rxq_ctrl->priv));
                        rte_errno = ENOMEM;
                        goto error;
                }
@@ -122,7 +122,7 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
        DRV_LOG(DEBUG,
                "port %u Rx queue %u allocated and configured %u segments"
                " (max %u packets)",
-               rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx, elts_n,
+               PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx, elts_n,
                elts_n / (1 << rxq_ctrl->rxq.sges_n));
        return 0;
 error:
@@ -134,7 +134,7 @@ error:
                (*rxq_ctrl->rxq.elts)[i] = NULL;
        }
        DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
-               rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx);
+               PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
        rte_errno = err; /* Restore rte_errno. */
        return -rte_errno;
 }
@@ -155,7 +155,7 @@ rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
        uint16_t i;
 
        DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
-               rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx);
+               PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
        if (rxq->elts == NULL)
                return;
        /**
@@ -186,7 +186,7 @@ void
 mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
 {
        DRV_LOG(DEBUG, "port %u cleaning up Rx queue %u",
-               rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx);
+               PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
        if (rxq_ctrl->ibv)
                mlx5_rxq_ibv_release(rxq_ctrl->ibv);
        memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
@@ -354,11 +354,11 @@ mlx5_rx_queue_release(void *dpdk_rxq)
                return;
        rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
        priv = rxq_ctrl->priv;
-       if (!mlx5_rxq_releasable(priv->dev, rxq_ctrl->rxq.stats.idx))
+       if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx))
                rte_panic("port %u Rx queue %u is still used by a flow and"
-                         " cannot be removed\n", priv->dev->data->port_id,
-                         rxq_ctrl->idx);
-       mlx5_rxq_release(priv->dev, rxq_ctrl->rxq.stats.idx);
+                         " cannot be removed\n",
+                         PORT_ID(priv), rxq_ctrl->idx);
+       mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx);
 }
 
 /**
@@ -378,9 +378,9 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
        unsigned int rxqs_n = priv->rxqs_n;
        unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
        unsigned int count = 0;
-       struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
+       struct rte_intr_handle *intr_handle = dev->intr_handle;
 
-       if (!priv->dev->data->dev_conf.intr_conf.rxq)
+       if (!dev->data->dev_conf.intr_conf.rxq)
                return 0;
        mlx5_rx_intr_vec_disable(dev);
        intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
@@ -452,12 +452,12 @@ void
 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
 {
        struct priv *priv = dev->data->dev_private;
-       struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
+       struct rte_intr_handle *intr_handle = dev->intr_handle;
        unsigned int i;
        unsigned int rxqs_n = priv->rxqs_n;
        unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
 
-       if (!priv->dev->data->dev_conf.intr_conf.rxq)
+       if (!dev->data->dev_conf.intr_conf.rxq)
                return;
        if (!intr_handle->intr_vec)
                goto free;
@@ -897,7 +897,7 @@ mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv)
        if (!ret)
                rxq_ibv->mr = NULL;
        DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
-               rxq_ibv->rxq_ctrl->priv->dev->data->port_id,
+               PORT_ID(rxq_ibv->rxq_ctrl->priv),
                rxq_ibv->rxq_ctrl->idx, rte_atomic32_read(&rxq_ibv->refcnt));
        if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) {
                rxq_free_elts(rxq_ibv->rxq_ctrl);
@@ -990,7 +990,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                return NULL;
        }
        tmpl->socket = socket;
-       if (priv->dev->data->dev_conf.intr_conf.rxq)
+       if (dev->data->dev_conf.intr_conf.rxq)
                tmpl->irq = 1;
        /* Enable scattered packets support for this queue if necessary. */
        assert(mb_len >= RTE_PKTMBUF_HEADROOM);
@@ -1259,9 +1259,9 @@ mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues,
        }
        rte_atomic32_inc(&ind_tbl->refcnt);
        LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
-       DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
-               dev->data->port_id, (void *)ind_tbl,
-               rte_atomic32_read(&ind_tbl->refcnt));
+       DEBUG("port %u new indirection table %p: queues:%u refcnt:%d",
+             dev->data->port_id, (void *)ind_tbl, 1 << wq_n,
+             rte_atomic32_read(&ind_tbl->refcnt));
        return ind_tbl;
 error:
        rte_free(ind_tbl);
@@ -1328,11 +1328,14 @@ mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
        unsigned int i;
 
        DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
-               ((struct priv *)dev->data->dev_private)->port,
-               (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
-       if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
+               dev->data->port_id, (void *)ind_tbl,
+               rte_atomic32_read(&ind_tbl->refcnt));
+       if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) {
                claim_zero(mlx5_glue->destroy_rwq_ind_table
                           (ind_tbl->ind_table));
+               DEBUG("port %u delete indirection table %p: queues: %u",
+                     dev->data->port_id, (void *)ind_tbl, ind_tbl->queues_n);
+       }
        for (i = 0; i != ind_tbl->queues_n; ++i)
                claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
        if (!rte_atomic32_read(&ind_tbl->refcnt)) {
@@ -1385,7 +1388,9 @@ mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev)
  * @param queues_n
  *   Number of queues.
  * @param tunnel
- *   Tunnel type.
+ *   Tunnel type, implies tunnel offloading like inner checksum if available.
+ * @param rss_level
+ *   RSS hash on tunnel level.
  *
  * @return
  *   The Verbs object initialised, NULL otherwise and rte_errno is set.
@@ -1394,13 +1399,17 @@ struct mlx5_hrxq *
 mlx5_hrxq_new(struct rte_eth_dev *dev,
              const uint8_t *rss_key, uint32_t rss_key_len,
              uint64_t hash_fields,
-             const uint16_t *queues, uint32_t queues_n, uint32_t tunnel)
+             const uint16_t *queues, uint32_t queues_n,
+             uint32_t tunnel, uint32_t rss_level)
 {
        struct priv *priv = dev->data->dev_private;
        struct mlx5_hrxq *hrxq;
        struct mlx5_ind_table_ibv *ind_tbl;
        struct ibv_qp *qp;
        int err;
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+       struct mlx5dv_qp_init_attr qp_init_attr = {0};
+#endif
 
        queues_n = hash_fields ? queues_n : 1;
        ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
@@ -1414,6 +1423,43 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
                rss_key_len = rss_hash_default_key_len;
                rss_key = rss_hash_default_key;
        }
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+       if (tunnel) {
+               qp_init_attr.comp_mask =
+                               MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
+               qp_init_attr.create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
+       }
+       qp = mlx5_glue->dv_create_qp
+               (priv->ctx,
+                &(struct ibv_qp_init_attr_ex){
+                       .qp_type = IBV_QPT_RAW_PACKET,
+                       .comp_mask =
+                               IBV_QP_INIT_ATTR_PD |
+                               IBV_QP_INIT_ATTR_IND_TABLE |
+                               IBV_QP_INIT_ATTR_RX_HASH,
+                       .rx_hash_conf = (struct ibv_rx_hash_conf){
+                               .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
+                               .rx_hash_key_len = rss_key_len ? rss_key_len :
+                                                  rss_hash_default_key_len,
+                               .rx_hash_key = rss_key ?
+                                              (void *)(uintptr_t)rss_key :
+                                              rss_hash_default_key,
+                               .rx_hash_fields_mask = hash_fields |
+                                       (tunnel && rss_level > 1 ?
+                                       (uint32_t)IBV_RX_HASH_INNER : 0),
+                       },
+                       .rwq_ind_tbl = ind_tbl->ind_table,
+                       .pd = priv->pd,
+                },
+                &qp_init_attr);
+       DEBUG("port %u new QP:%p ind_tbl:%p hash_fields:0x%" PRIx64
+             " tunnel:0x%x level:%u dv_attr:comp_mask:0x%" PRIx64
+             " create_flags:0x%x",
+             dev->data->port_id, (void *)qp, (void *)ind_tbl,
+             (tunnel && rss_level == 2 ? (uint32_t)IBV_RX_HASH_INNER : 0) |
+             hash_fields, tunnel, rss_level,
+             qp_init_attr.comp_mask, qp_init_attr.create_flags);
+#else
        qp = mlx5_glue->create_qp_ex
                (priv->ctx,
                 &(struct ibv_qp_init_attr_ex){
@@ -1424,13 +1470,21 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
                                IBV_QP_INIT_ATTR_RX_HASH,
                        .rx_hash_conf = (struct ibv_rx_hash_conf){
                                .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
-                               .rx_hash_key_len = rss_key_len,
-                               .rx_hash_key = (void *)(uintptr_t)rss_key,
+                               .rx_hash_key_len = rss_key_len ? rss_key_len :
+                                                  rss_hash_default_key_len,
+                               .rx_hash_key = rss_key ?
+                                              (void *)(uintptr_t)rss_key :
+                                              rss_hash_default_key,
                                .rx_hash_fields_mask = hash_fields,
                        },
                        .rwq_ind_tbl = ind_tbl->ind_table,
                        .pd = priv->pd,
                 });
+       DEBUG("port %u new QP:%p ind_tbl:%p hash_fields:0x%" PRIx64
+             " tunnel:0x%x level:%hhu",
+             dev->data->port_id, (void *)qp, (void *)ind_tbl,
+             hash_fields, tunnel, rss_level);
+#endif
        if (!qp) {
                rte_errno = errno;
                goto error;
@@ -1443,6 +1497,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
        hrxq->rss_key_len = rss_key_len;
        hrxq->hash_fields = hash_fields;
        hrxq->tunnel = tunnel;
+       hrxq->rss_level = rss_level;
        memcpy(hrxq->rss_key, rss_key, rss_key_len);
        rte_atomic32_inc(&hrxq->refcnt);
        LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
@@ -1472,7 +1527,9 @@ error:
  * @param queues_n
  *   Number of queues.
  * @param tunnel
- *   Tunnel type.
+ *   Tunnel type, implies tunnel offloading like inner checksum if available.
+ * @param rss_level
+ *   RSS hash on tunnel level
  *
  * @return
  *   An hash Rx queue on success.
@@ -1481,7 +1538,8 @@ struct mlx5_hrxq *
 mlx5_hrxq_get(struct rte_eth_dev *dev,
              const uint8_t *rss_key, uint32_t rss_key_len,
              uint64_t hash_fields,
-             const uint16_t *queues, uint32_t queues_n, uint32_t tunnel)
+             const uint16_t *queues, uint32_t queues_n,
+             uint32_t tunnel, uint32_t rss_level)
 {
        struct priv *priv = dev->data->dev_private;
        struct mlx5_hrxq *hrxq;
@@ -1498,6 +1556,8 @@ mlx5_hrxq_get(struct rte_eth_dev *dev,
                        continue;
                if (hrxq->tunnel != tunnel)
                        continue;
+               if (hrxq->rss_level != rss_level)
+                       continue;
                ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
                if (!ind_tbl)
                        continue;
@@ -1529,10 +1589,14 @@ int
 mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
 {
        DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
-               ((struct priv *)dev->data->dev_private)->port,
-               (void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
+               dev->data->port_id, (void *)hrxq,
+               rte_atomic32_read(&hrxq->refcnt));
        if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
                claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
+               DEBUG("port %u delete QP %p: hash: 0x%" PRIx64 ", tunnel:"
+                     " 0x%x, level: %u",
+                     dev->data->port_id, (void *)hrxq, hrxq->hash_fields,
+                     hrxq->tunnel, hrxq->rss_level);
                mlx5_ind_table_ibv_release(dev, hrxq->ind_table);
                LIST_REMOVE(hrxq, next);
                rte_free(hrxq);