.ret = 0,
        };
 
+       DRV_LOG(DEBUG, "Port %u Rx queue registering mp %s "
+                      "having %u chunks.", dev->data->port_id,
+                      mp->name, mp->nb_mem_chunks);
        rte_mempool_mem_iter(mp, mlx5_mr_update_mp_cb, &data);
        if (data.ret < 0 && rte_errno == ENXIO) {
                /* Mempool may have externally allocated memory. */
 
                dev->data->port_id, priv->sh->device_attr.max_sge);
        for (i = 0; i != priv->rxqs_n; ++i) {
                struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
-               struct rte_mempool *mp;
 
                if (!rxq_ctrl)
                        continue;
                if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
-                       /* Pre-register Rx mempool. */
-                       mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
-                            rxq_ctrl->rxq.mprq_mp : rxq_ctrl->rxq.mp;
-                       DRV_LOG(DEBUG, "Port %u Rx queue %u registering mp %s"
-                               " having %u chunks.", dev->data->port_id,
-                               rxq_ctrl->rxq.idx, mp->name, mp->nb_mem_chunks);
-                       mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp);
+                       /* Pre-register Rx mempools. */
+                       if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) {
+                               mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl,
+                                                 rxq_ctrl->rxq.mprq_mp);
+                       } else {
+                               uint32_t s;
+
+                               for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++)
+                                       mlx5_mr_update_mp
+                                               (dev, &rxq_ctrl->rxq.mr_ctrl,
+                                               rxq_ctrl->rxq.rxseg[s].mp);
+                       }
                        ret = rxq_alloc_elts(rxq_ctrl);
                        if (ret)
                                goto error;