X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_trigger.c;h=74c9c0a4fff8c82df12db4cb8d002b02dfb756cd;hb=8e83ba285abe4341b7666927d3fc265b35446c06;hp=e5d74d275f85e99900a75b58ef5da7ae27c9cbbc;hpb=0cedf34da78ff5633184860779c3733c6f8be36b;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index e5d74d275f..74c9c0a4ff 100644 --- a/drivers/net/mlx5/mlx5_trigger.c +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -105,21 +105,6 @@ error: return -rte_errno; } -/** - * Translate the chunk address to MR key in order to put in into the cache. - */ -static void -mlx5_rxq_mempool_register_cb(struct rte_mempool *mp, void *opaque, - struct rte_mempool_memhdr *memhdr, - unsigned int idx) -{ - struct mlx5_rxq_data *rxq = opaque; - - RTE_SET_USED(mp); - RTE_SET_USED(idx); - mlx5_rx_addr2mr(rxq, (uintptr_t)memhdr->addr); -} - /** * Register Rx queue mempools and fill the Rx queue cache. * This function tolerates repeated mempool registration. @@ -133,28 +118,29 @@ mlx5_rxq_mempool_register_cb(struct rte_mempool *mp, void *opaque, static int mlx5_rxq_mempool_register(struct mlx5_rxq_ctrl *rxq_ctrl) { - struct mlx5_priv *priv = rxq_ctrl->priv; struct rte_mempool *mp; uint32_t s; int ret = 0; mlx5_mr_flush_local_cache(&rxq_ctrl->rxq.mr_ctrl); /* MPRQ mempool is registered on creation, just fill the cache. */ - if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) { - rte_mempool_mem_iter(rxq_ctrl->rxq.mprq_mp, - mlx5_rxq_mempool_register_cb, - &rxq_ctrl->rxq); - return 0; - } + if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) + return mlx5_mr_mempool_populate_cache(&rxq_ctrl->rxq.mr_ctrl, + rxq_ctrl->rxq.mprq_mp); for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++) { + bool is_extmem; + mp = rxq_ctrl->rxq.rxseg[s].mp; - ret = mlx5_mr_mempool_register(&priv->sh->cdev->mr_scache, - priv->sh->cdev->pd, mp, - &priv->mp_id); + is_extmem = (rte_pktmbuf_priv_flags(mp) & + RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) != 0; + ret = mlx5_mr_mempool_register(rxq_ctrl->sh->cdev, mp, + is_extmem); if (ret < 0 && rte_errno != EEXIST) return ret; - rte_mempool_mem_iter(mp, mlx5_rxq_mempool_register_cb, - &rxq_ctrl->rxq); + ret = mlx5_mr_mempool_populate_cache(&rxq_ctrl->rxq.mr_ctrl, + mp); + if (ret < 0) + return ret; } return 0; } @@ -175,6 +161,39 @@ mlx5_rxq_stop(struct rte_eth_dev *dev) mlx5_rxq_release(dev, i); } +static int +mlx5_rxq_ctrl_prepare(struct rte_eth_dev *dev, struct mlx5_rxq_ctrl *rxq_ctrl, + unsigned int idx) +{ + int ret = 0; + + if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) { + /* + * Pre-register the mempools. Regardless of whether + * the implicit registration is enabled or not, + * Rx mempool destruction is tracked to free MRs. + */ + if (mlx5_rxq_mempool_register(rxq_ctrl) < 0) + return -rte_errno; + ret = rxq_alloc_elts(rxq_ctrl); + if (ret) + return ret; + } + MLX5_ASSERT(!rxq_ctrl->obj); + rxq_ctrl->obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, + sizeof(*rxq_ctrl->obj), 0, + rxq_ctrl->socket); + if (!rxq_ctrl->obj) { + DRV_LOG(ERR, "Port %u Rx queue %u can't allocate resources.", + dev->data->port_id, idx); + rte_errno = ENOMEM; + return -rte_errno; + } + DRV_LOG(DEBUG, "Port %u rxq %u updated with %p.", dev->data->port_id, + idx, (void *)&rxq_ctrl->obj); + return 0; +} + /** * Start traffic on Rx queues. * @@ -207,38 +226,18 @@ mlx5_rxq_start(struct rte_eth_dev *dev) if (rxq == NULL) continue; rxq_ctrl = rxq->ctrl; - if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) { - /* - * Pre-register the mempools. Regardless of whether - * the implicit registration is enabled or not, - * Rx mempool destruction is tracked to free MRs. - */ - if (mlx5_rxq_mempool_register(rxq_ctrl) < 0) - goto error; - ret = rxq_alloc_elts(rxq_ctrl); - if (ret) + if (!rxq_ctrl->started) { + if (mlx5_rxq_ctrl_prepare(dev, rxq_ctrl, i) < 0) goto error; + LIST_INSERT_HEAD(&priv->rxqsobj, rxq_ctrl->obj, next); } - MLX5_ASSERT(!rxq_ctrl->obj); - rxq_ctrl->obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, - sizeof(*rxq_ctrl->obj), 0, - rxq_ctrl->socket); - if (!rxq_ctrl->obj) { - DRV_LOG(ERR, - "Port %u Rx queue %u can't allocate resources.", - dev->data->port_id, (*priv->rxqs)[i]->idx); - rte_errno = ENOMEM; - goto error; - } - ret = priv->obj_ops.rxq_obj_new(dev, i); + ret = priv->obj_ops.rxq_obj_new(rxq); if (ret) { mlx5_free(rxq_ctrl->obj); rxq_ctrl->obj = NULL; goto error; } - DRV_LOG(DEBUG, "Port %u rxq %u updated with %p.", - dev->data->port_id, i, (void *)&rxq_ctrl->obj); - LIST_INSERT_HEAD(&priv->rxqsobj, rxq_ctrl->obj, next); + rxq_ctrl->started = true; } return 0; error: @@ -324,7 +323,7 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev) } rxq_ctrl = rxq->ctrl; if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN || - rxq_ctrl->hairpin_conf.peers[0].queue != i) { + rxq->hairpin_conf.peers[0].queue != i) { rte_errno = ENOMEM; DRV_LOG(ERR, "port %u Tx queue %d can't be binded to " "Rx queue %d", dev->data->port_id, @@ -354,7 +353,7 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev) if (ret) goto error; /* Qs with auto-bind will be destroyed directly. */ - rxq_ctrl->hairpin_status = 1; + rxq->hairpin_status = 1; txq_ctrl->hairpin_status = 1; mlx5_txq_release(dev, i); } @@ -457,9 +456,9 @@ mlx5_hairpin_queue_peer_update(struct rte_eth_dev *dev, uint16_t peer_queue, } peer_info->qp_id = rxq_ctrl->obj->rq->id; peer_info->vhca_id = priv->config.hca_attr.vhca_id; - peer_info->peer_q = rxq_ctrl->hairpin_conf.peers[0].queue; - peer_info->tx_explicit = rxq_ctrl->hairpin_conf.tx_explicit; - peer_info->manual_bind = rxq_ctrl->hairpin_conf.manual_bind; + peer_info->peer_q = rxq->hairpin_conf.peers[0].queue; + peer_info->tx_explicit = rxq->hairpin_conf.tx_explicit; + peer_info->manual_bind = rxq->hairpin_conf.manual_bind; } return 0; } @@ -581,20 +580,20 @@ mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue, dev->data->port_id, cur_queue); return -rte_errno; } - if (rxq_ctrl->hairpin_status != 0) { + if (rxq->hairpin_status != 0) { DRV_LOG(DEBUG, "port %u Rx queue %d is already bound", dev->data->port_id, cur_queue); return 0; } if (peer_info->tx_explicit != - rxq_ctrl->hairpin_conf.tx_explicit) { + rxq->hairpin_conf.tx_explicit) { rte_errno = EINVAL; DRV_LOG(ERR, "port %u Rx queue %d and peer Tx rule mode" " mismatch", dev->data->port_id, cur_queue); return -rte_errno; } if (peer_info->manual_bind != - rxq_ctrl->hairpin_conf.manual_bind) { + rxq->hairpin_conf.manual_bind) { rte_errno = EINVAL; DRV_LOG(ERR, "port %u Rx queue %d and peer binding mode" " mismatch", dev->data->port_id, cur_queue); @@ -606,7 +605,7 @@ mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue, rq_attr.hairpin_peer_vhca = peer_info->vhca_id; ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr); if (ret == 0) - rxq_ctrl->hairpin_status = 1; + rxq->hairpin_status = 1; } return ret; } @@ -688,7 +687,7 @@ mlx5_hairpin_queue_peer_unbind(struct rte_eth_dev *dev, uint16_t cur_queue, dev->data->port_id, cur_queue); return -rte_errno; } - if (rxq_ctrl->hairpin_status == 0) { + if (rxq->hairpin_status == 0) { DRV_LOG(DEBUG, "port %u Rx queue %d is already unbound", dev->data->port_id, cur_queue); return 0; @@ -703,7 +702,7 @@ mlx5_hairpin_queue_peer_unbind(struct rte_eth_dev *dev, uint16_t cur_queue, rq_attr.rq_state = MLX5_SQC_STATE_RST; ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr); if (ret == 0) - rxq_ctrl->hairpin_status = 0; + rxq->hairpin_status = 0; } return ret; } @@ -854,7 +853,7 @@ error: /* * Unbind the hairpin port pair, HW configuration of both devices will be clear - * and status will be reset for all the queues used between the them. + * and status will be reset for all the queues used between them. * This function only supports to unbind the Tx from one Rx. * * @param dev @@ -1041,7 +1040,7 @@ mlx5_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports, rxq_ctrl = rxq->ctrl; if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) continue; - pp = rxq_ctrl->hairpin_conf.peers[0].port; + pp = rxq->hairpin_conf.peers[0].port; if (pp >= RTE_MAX_ETHPORTS) { rte_errno = ERANGE; DRV_LOG(ERR, "port %hu queue %u peer port " @@ -1115,6 +1114,24 @@ mlx5_dev_start(struct rte_eth_dev *dev) dev->data->port_id, strerror(rte_errno)); goto error; } + if (priv->config.std_delay_drop || priv->config.hp_delay_drop) { + if (!priv->config.vf && !priv->config.sf && + !priv->representor) { + ret = mlx5_get_flag_dropless_rq(dev); + if (ret < 0) + DRV_LOG(WARNING, + "port %u cannot query dropless flag", + dev->data->port_id); + else if (!ret) + DRV_LOG(WARNING, + "port %u dropless_rq OFF, no rearming", + dev->data->port_id); + } else { + DRV_LOG(DEBUG, + "port %u doesn't support dropless_rq flag", + dev->data->port_id); + } + } ret = mlx5_rxq_start(dev); if (ret) { DRV_LOG(ERR, "port %u Rx queue allocation failed: %s", @@ -1140,6 +1157,17 @@ mlx5_dev_start(struct rte_eth_dev *dev) goto error; } mlx5_os_stats_init(dev); + /* + * Attach indirection table objects detached on port stop. + * They may be needed to create RSS in non-isolated mode. + */ + ret = mlx5_action_handle_attach(dev); + if (ret) { + DRV_LOG(ERR, + "port %u failed to attach indirect actions: %s", + dev->data->port_id, rte_strerror(rte_errno)); + goto error; + } ret = mlx5_traffic_enable(dev); if (ret) { DRV_LOG(ERR, "port %u failed to set defaults flows", @@ -1152,14 +1180,6 @@ mlx5_dev_start(struct rte_eth_dev *dev) mlx5_rxq_timestamp_set(dev); /* Set a mask and offset of scheduling on timestamp into Tx queues. */ mlx5_txq_dynf_timestamp_set(dev); - /* Attach indirection table objects detached on port stop. */ - ret = mlx5_action_handle_attach(dev); - if (ret) { - DRV_LOG(ERR, - "port %u failed to attach indirect actions: %s", - dev->data->port_id, rte_strerror(rte_errno)); - goto error; - } /* * In non-cached mode, it only needs to start the default mreg copy * action and no flow created by application exists anymore.