+uint32_t
+mlx5_get_supported_sw_parsing_offloads(const struct mlx5_hca_attr *attr)
+{
+ uint32_t sw_parsing_offloads = 0;
+
+ if (attr->swp) {
+ sw_parsing_offloads |= MLX5_SW_PARSING_CAP;
+ if (attr->swp_csum)
+ sw_parsing_offloads |= MLX5_SW_PARSING_CSUM_CAP;
+
+ if (attr->swp_lso)
+ sw_parsing_offloads |= MLX5_SW_PARSING_TSO_CAP;
+ }
+ return sw_parsing_offloads;
+}
+
+uint32_t
+mlx5_get_supported_tunneling_offloads(const struct mlx5_hca_attr *attr)
+{
+ uint32_t tn_offloads = 0;
+
+ if (attr->tunnel_stateless_vxlan)
+ tn_offloads |= MLX5_TUNNELED_OFFLOADS_VXLAN_CAP;
+ if (attr->tunnel_stateless_gre)
+ tn_offloads |= MLX5_TUNNELED_OFFLOADS_GRE_CAP;
+ if (attr->tunnel_stateless_geneve_rx)
+ tn_offloads |= MLX5_TUNNELED_OFFLOADS_GENEVE_CAP;
+ return tn_offloads;
+}
+
+/* Fill all fields of UAR structure. */
+static int
+mlx5_rxtx_uars_prepare(struct mlx5_dev_ctx_shared *sh)
+{
+ int ret;
+
+ ret = mlx5_devx_uar_prepare(sh->cdev, &sh->tx_uar);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to prepare Tx DevX UAR.");
+ return -rte_errno;
+ }
+ MLX5_ASSERT(sh->tx_uar.obj);
+ MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->tx_uar.obj));
+ ret = mlx5_devx_uar_prepare(sh->cdev, &sh->rx_uar);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to prepare Rx DevX UAR.");
+ mlx5_devx_uar_release(&sh->tx_uar);
+ return -rte_errno;
+ }
+ MLX5_ASSERT(sh->rx_uar.obj);
+ MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->rx_uar.obj));
+ return 0;
+}
+
+static void
+mlx5_rxtx_uars_release(struct mlx5_dev_ctx_shared *sh)
+{
+ mlx5_devx_uar_release(&sh->rx_uar);
+ mlx5_devx_uar_release(&sh->tx_uar);
+}
+
+/**
+ * rte_mempool_walk() callback to unregister Rx mempools.
+ * It used when implicit mempool registration is disabled.
+ *
+ * @param mp
+ * The mempool being walked.
+ * @param arg
+ * Pointer to the device shared context.
+ */
+static void
+mlx5_dev_ctx_shared_rx_mempool_unregister_cb(struct rte_mempool *mp, void *arg)
+{
+ struct mlx5_dev_ctx_shared *sh = arg;
+
+ mlx5_dev_mempool_unregister(sh->cdev, mp);
+}
+
+/**
+ * Callback used when implicit mempool registration is disabled
+ * in order to track Rx mempool destruction.
+ *
+ * @param event
+ * Mempool life cycle event.
+ * @param mp
+ * An Rx mempool registered explicitly when the port is started.
+ * @param arg
+ * Pointer to a device shared context.
+ */
+static void
+mlx5_dev_ctx_shared_rx_mempool_event_cb(enum rte_mempool_event event,
+ struct rte_mempool *mp, void *arg)
+{
+ struct mlx5_dev_ctx_shared *sh = arg;
+
+ if (event == RTE_MEMPOOL_EVENT_DESTROY)
+ mlx5_dev_mempool_unregister(sh->cdev, mp);
+}
+
+int
+mlx5_dev_ctx_shared_mempool_subscribe(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ int ret;
+
+ /* Check if we only need to track Rx mempool destruction. */
+ if (!sh->cdev->config.mr_mempool_reg_en) {
+ ret = rte_mempool_event_callback_register
+ (mlx5_dev_ctx_shared_rx_mempool_event_cb, sh);
+ return ret == 0 || rte_errno == EEXIST ? 0 : ret;
+ }
+ return mlx5_dev_mempool_subscribe(sh->cdev);
+}
+
+/**
+ * Set up multiple TISs with different affinities according to
+ * number of bonding ports