+ container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+ int ret;
+
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RDY2RST);
+ if (ret) {
+ DRV_LOG(ERR, "Cannot change Rx WQ state to RESET: %s",
+ strerror(errno));
+ rte_errno = errno;
+ return ret;
+ }
+ /* Remove all processes CQEs. */
+ rxq_sync_cq(rxq);
+ /* Free all involved mbufs. */
+ rxq_free_elts(rxq_ctrl);
+ /* Set the actual queue state. */
+ dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ return 0;
+}
+
+/**
+ * Rx queue stop. Device queue goes to the RESET state,
+ * all involved mbufs are freed from WQ.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * RX queue index.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_rx_queue_stop(struct rte_eth_dev *dev, uint16_t idx)
+{
+ eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
+ int ret;
+
+ if (rte_eth_dev_is_rx_hairpin_queue(dev, idx)) {
+ DRV_LOG(ERR, "Hairpin queue can't be stopped");
+ rte_errno = EINVAL;
+ return -EINVAL;
+ }
+ if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STOPPED)
+ return 0;
+ /*
+ * Vectorized Rx burst requires the CQ and RQ indices
+ * synchronized, that might be broken on RQ restart
+ * and cause Rx malfunction, so queue stopping is
+ * not supported if vectorized Rx burst is engaged.
+ * The routine pointer depends on the process
+ * type, should perform check there.
+ */
+ if (pkt_burst == mlx5_rx_burst_vec) {
+ DRV_LOG(ERR, "Rx queue stop is not supported "
+ "for vectorized Rx");
+ rte_errno = EINVAL;
+ return -EINVAL;
+ }
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ ret = mlx5_mp_os_req_queue_control(dev, idx,
+ MLX5_MP_REQ_QUEUE_RX_STOP);
+ } else {
+ ret = mlx5_rx_queue_stop_primary(dev, idx);
+ }
+ return ret;
+}
+
+/**
+ * Rx queue start. Device queue goes to the ready state,
+ * all required mbufs are allocated and WQ is replenished.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * RX queue index.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+ int ret;
+
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ /* Allocate needed buffers. */
+ ret = rxq_alloc_elts(rxq_ctrl);
+ if (ret) {
+ DRV_LOG(ERR, "Cannot reallocate buffers for Rx WQ");
+ rte_errno = errno;
+ return ret;
+ }
+ rte_io_wmb();
+ *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
+ rte_io_wmb();
+ /* Reset RQ consumer before moving queue ro READY state. */
+ *rxq->rq_db = rte_cpu_to_be_32(0);
+ rte_io_wmb();
+ ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RST2RDY);
+ if (ret) {
+ DRV_LOG(ERR, "Cannot change Rx WQ state to READY: %s",
+ strerror(errno));
+ rte_errno = errno;
+ return ret;
+ }
+ /* Reinitialize RQ - set WQEs. */
+ mlx5_rxq_initialize(rxq);
+ rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
+ /* Set actual queue state. */
+ dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
+ return 0;
+}
+
+/**
+ * Rx queue start. Device queue goes to the ready state,
+ * all required mbufs are allocated and WQ is replenished.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * RX queue index.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_rx_queue_start(struct rte_eth_dev *dev, uint16_t idx)
+{
+ int ret;