X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_trigger.c;h=c68b32cf14e72dca833152f78e82c4da78d6502b;hb=61da0fe6c46ab11aeeeb8a9bd939b98ef50eca15;hp=72475e4b5b50b74ae144bc2694ca7a1c22f309a7;hpb=5cf0707fc7e97930ac656a2030608af666165fcf;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index 72475e4b5b..c68b32cf14 100644 --- a/drivers/net/mlx5/mlx5_trigger.c +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -59,7 +59,7 @@ mlx5_txq_start(struct rte_eth_dev *dev) if (!txq_ctrl) continue; - if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD) + if (!txq_ctrl->is_hairpin) txq_alloc_elts(txq_ctrl); MLX5_ASSERT(!txq_ctrl->obj); txq_ctrl->obj = mlx5_malloc(flags, sizeof(struct mlx5_txq_obj), @@ -77,7 +77,7 @@ mlx5_txq_start(struct rte_eth_dev *dev) txq_ctrl->obj = NULL; goto error; } - if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD) { + if (!txq_ctrl->is_hairpin) { size_t size = txq_data->cqe_s * sizeof(*txq_data->fcqs); txq_data->fcqs = mlx5_malloc(flags, size, @@ -105,21 +105,6 @@ error: return -rte_errno; } -/** - * Translate the chunk address to MR key in order to put in into the cache. - */ -static void -mlx5_rxq_mempool_register_cb(struct rte_mempool *mp, void *opaque, - struct rte_mempool_memhdr *memhdr, - unsigned int idx) -{ - struct mlx5_rxq_data *rxq = opaque; - - RTE_SET_USED(mp); - RTE_SET_USED(idx); - mlx5_rx_addr2mr(rxq, (uintptr_t)memhdr->addr); -} - /** * Register Rx queue mempools and fill the Rx queue cache. * This function tolerates repeated mempool registration. @@ -131,31 +116,31 @@ mlx5_rxq_mempool_register_cb(struct rte_mempool *mp, void *opaque, * 0 on success, (-1) on failure and rte_errno is set. */ static int -mlx5_rxq_mempool_register(struct rte_eth_dev *dev, - struct mlx5_rxq_ctrl *rxq_ctrl) +mlx5_rxq_mempool_register(struct mlx5_rxq_ctrl *rxq_ctrl) { - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = rxq_ctrl->sh; struct rte_mempool *mp; uint32_t s; int ret = 0; mlx5_mr_flush_local_cache(&rxq_ctrl->rxq.mr_ctrl); /* MPRQ mempool is registered on creation, just fill the cache. */ - if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) { - rte_mempool_mem_iter(rxq_ctrl->rxq.mprq_mp, - mlx5_rxq_mempool_register_cb, - &rxq_ctrl->rxq); - return 0; - } + if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) + return mlx5_mr_mempool_populate_cache(&rxq_ctrl->rxq.mr_ctrl, + rxq_ctrl->rxq.mprq_mp); for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++) { + bool is_extmem; + mp = rxq_ctrl->rxq.rxseg[s].mp; - ret = mlx5_mr_mempool_register(&sh->cdev->mr_scache, - sh->cdev->pd, mp, &priv->mp_id); + is_extmem = (rte_pktmbuf_priv_flags(mp) & + RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) != 0; + ret = mlx5_mr_mempool_register(rxq_ctrl->sh->cdev, mp, + is_extmem); if (ret < 0 && rte_errno != EEXIST) return ret; - rte_mempool_mem_iter(mp, mlx5_rxq_mempool_register_cb, - &rxq_ctrl->rxq); + ret = mlx5_mr_mempool_populate_cache(&rxq_ctrl->rxq.mr_ctrl, + mp); + if (ret < 0) + return ret; } return 0; } @@ -176,6 +161,39 @@ mlx5_rxq_stop(struct rte_eth_dev *dev) mlx5_rxq_release(dev, i); } +static int +mlx5_rxq_ctrl_prepare(struct rte_eth_dev *dev, struct mlx5_rxq_ctrl *rxq_ctrl, + unsigned int idx) +{ + int ret = 0; + + if (!rxq_ctrl->is_hairpin) { + /* + * Pre-register the mempools. Regardless of whether + * the implicit registration is enabled or not, + * Rx mempool destruction is tracked to free MRs. + */ + if (mlx5_rxq_mempool_register(rxq_ctrl) < 0) + return -rte_errno; + ret = rxq_alloc_elts(rxq_ctrl); + if (ret) + return ret; + } + MLX5_ASSERT(!rxq_ctrl->obj); + rxq_ctrl->obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, + sizeof(*rxq_ctrl->obj), 0, + rxq_ctrl->socket); + if (!rxq_ctrl->obj) { + DRV_LOG(ERR, "Port %u Rx queue %u can't allocate resources.", + dev->data->port_id, idx); + rte_errno = ENOMEM; + return -rte_errno; + } + DRV_LOG(DEBUG, "Port %u rxq %u updated with %p.", dev->data->port_id, + idx, (void *)&rxq_ctrl->obj); + return 0; +} + /** * Start traffic on Rx queues. * @@ -197,10 +215,10 @@ mlx5_rxq_start(struct rte_eth_dev *dev) /* Should not release Rx queues but return immediately. */ return -rte_errno; } - DRV_LOG(DEBUG, "Port %u device_attr.max_qp_wr is %d.", - dev->data->port_id, priv->sh->device_attr.max_qp_wr); - DRV_LOG(DEBUG, "Port %u device_attr.max_sge is %d.", - dev->data->port_id, priv->sh->device_attr.max_sge); + DRV_LOG(DEBUG, "Port %u dev_cap.max_qp_wr is %d.", + dev->data->port_id, priv->sh->dev_cap.max_qp_wr); + DRV_LOG(DEBUG, "Port %u dev_cap.max_sge is %d.", + dev->data->port_id, priv->sh->dev_cap.max_sge); for (i = 0; i != priv->rxqs_n; ++i) { struct mlx5_rxq_priv *rxq = mlx5_rxq_ref(dev, i); struct mlx5_rxq_ctrl *rxq_ctrl; @@ -208,28 +226,10 @@ mlx5_rxq_start(struct rte_eth_dev *dev) if (rxq == NULL) continue; rxq_ctrl = rxq->ctrl; - if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) { - /* - * Pre-register the mempools. Regardless of whether - * the implicit registration is enabled or not, - * Rx mempool destruction is tracked to free MRs. - */ - if (mlx5_rxq_mempool_register(dev, rxq_ctrl) < 0) - goto error; - ret = rxq_alloc_elts(rxq_ctrl); - if (ret) + if (!rxq_ctrl->started) { + if (mlx5_rxq_ctrl_prepare(dev, rxq_ctrl, i) < 0) goto error; - } - MLX5_ASSERT(!rxq_ctrl->obj); - rxq_ctrl->obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, - sizeof(*rxq_ctrl->obj), 0, - rxq_ctrl->socket); - if (!rxq_ctrl->obj) { - DRV_LOG(ERR, - "Port %u Rx queue %u can't allocate resources.", - dev->data->port_id, i); - rte_errno = ENOMEM; - goto error; + LIST_INSERT_HEAD(&priv->rxqsobj, rxq_ctrl->obj, next); } ret = priv->obj_ops.rxq_obj_new(rxq); if (ret) { @@ -237,9 +237,7 @@ mlx5_rxq_start(struct rte_eth_dev *dev) rxq_ctrl->obj = NULL; goto error; } - DRV_LOG(DEBUG, "Port %u rxq %u updated with %p.", - dev->data->port_id, i, (void *)&rxq_ctrl->obj); - LIST_INSERT_HEAD(&priv->rxqsobj, rxq_ctrl->obj, next); + rxq_ctrl->started = true; } return 0; error: @@ -282,7 +280,7 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev) txq_ctrl = mlx5_txq_get(dev, i); if (!txq_ctrl) continue; - if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN || + if (!txq_ctrl->is_hairpin || txq_ctrl->hairpin_conf.peers[0].port != self_port) { mlx5_txq_release(dev, i); continue; @@ -301,7 +299,7 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev) if (!txq_ctrl) continue; /* Skip hairpin queues with other peer ports. */ - if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN || + if (!txq_ctrl->is_hairpin || txq_ctrl->hairpin_conf.peers[0].port != self_port) { mlx5_txq_release(dev, i); continue; @@ -324,7 +322,7 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev) return -rte_errno; } rxq_ctrl = rxq->ctrl; - if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN || + if (!rxq_ctrl->is_hairpin || rxq->hairpin_conf.peers[0].queue != i) { rte_errno = ENOMEM; DRV_LOG(ERR, "port %u Tx queue %d can't be binded to " @@ -343,14 +341,16 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev) sq_attr.state = MLX5_SQC_STATE_RDY; sq_attr.sq_state = MLX5_SQC_STATE_RST; sq_attr.hairpin_peer_rq = rq->id; - sq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id; + sq_attr.hairpin_peer_vhca = + priv->sh->cdev->config.hca_attr.vhca_id; ret = mlx5_devx_cmd_modify_sq(sq, &sq_attr); if (ret) goto error; rq_attr.state = MLX5_SQC_STATE_RDY; rq_attr.rq_state = MLX5_SQC_STATE_RST; rq_attr.hairpin_peer_sq = sq->id; - rq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id; + rq_attr.hairpin_peer_vhca = + priv->sh->cdev->config.hca_attr.vhca_id; ret = mlx5_devx_cmd_modify_rq(rq, &rq_attr); if (ret) goto error; @@ -412,7 +412,7 @@ mlx5_hairpin_queue_peer_update(struct rte_eth_dev *dev, uint16_t peer_queue, dev->data->port_id, peer_queue); return -rte_errno; } - if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) { + if (!txq_ctrl->is_hairpin) { rte_errno = EINVAL; DRV_LOG(ERR, "port %u queue %d is not a hairpin Txq", dev->data->port_id, peer_queue); @@ -427,7 +427,7 @@ mlx5_hairpin_queue_peer_update(struct rte_eth_dev *dev, uint16_t peer_queue, return -rte_errno; } peer_info->qp_id = txq_ctrl->obj->sq->id; - peer_info->vhca_id = priv->config.hca_attr.vhca_id; + peer_info->vhca_id = priv->sh->cdev->config.hca_attr.vhca_id; /* 1-to-1 mapping, only the first one is used. */ peer_info->peer_q = txq_ctrl->hairpin_conf.peers[0].queue; peer_info->tx_explicit = txq_ctrl->hairpin_conf.tx_explicit; @@ -444,7 +444,7 @@ mlx5_hairpin_queue_peer_update(struct rte_eth_dev *dev, uint16_t peer_queue, return -rte_errno; } rxq_ctrl = rxq->ctrl; - if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) { + if (!rxq_ctrl->is_hairpin) { rte_errno = EINVAL; DRV_LOG(ERR, "port %u queue %d is not a hairpin Rxq", dev->data->port_id, peer_queue); @@ -457,7 +457,7 @@ mlx5_hairpin_queue_peer_update(struct rte_eth_dev *dev, uint16_t peer_queue, return -rte_errno; } peer_info->qp_id = rxq_ctrl->obj->rq->id; - peer_info->vhca_id = priv->config.hca_attr.vhca_id; + peer_info->vhca_id = priv->sh->cdev->config.hca_attr.vhca_id; peer_info->peer_q = rxq->hairpin_conf.peers[0].queue; peer_info->tx_explicit = rxq->hairpin_conf.tx_explicit; peer_info->manual_bind = rxq->hairpin_conf.manual_bind; @@ -510,7 +510,7 @@ mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue, dev->data->port_id, cur_queue); return -rte_errno; } - if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) { + if (!txq_ctrl->is_hairpin) { rte_errno = EINVAL; DRV_LOG(ERR, "port %u queue %d not a hairpin Txq", dev->data->port_id, cur_queue); @@ -570,7 +570,7 @@ mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue, return -rte_errno; } rxq_ctrl = rxq->ctrl; - if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) { + if (!rxq_ctrl->is_hairpin) { rte_errno = EINVAL; DRV_LOG(ERR, "port %u queue %d not a hairpin Rxq", dev->data->port_id, cur_queue); @@ -644,7 +644,7 @@ mlx5_hairpin_queue_peer_unbind(struct rte_eth_dev *dev, uint16_t cur_queue, dev->data->port_id, cur_queue); return -rte_errno; } - if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) { + if (!txq_ctrl->is_hairpin) { rte_errno = EINVAL; DRV_LOG(ERR, "port %u queue %d not a hairpin Txq", dev->data->port_id, cur_queue); @@ -683,7 +683,7 @@ mlx5_hairpin_queue_peer_unbind(struct rte_eth_dev *dev, uint16_t cur_queue, return -rte_errno; } rxq_ctrl = rxq->ctrl; - if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) { + if (!rxq_ctrl->is_hairpin) { rte_errno = EINVAL; DRV_LOG(ERR, "port %u queue %d not a hairpin Rxq", dev->data->port_id, cur_queue); @@ -751,7 +751,7 @@ mlx5_hairpin_bind_single_port(struct rte_eth_dev *dev, uint16_t rx_port) txq_ctrl = mlx5_txq_get(dev, i); if (txq_ctrl == NULL) continue; - if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) { + if (!txq_ctrl->is_hairpin) { mlx5_txq_release(dev, i); continue; } @@ -791,7 +791,7 @@ mlx5_hairpin_bind_single_port(struct rte_eth_dev *dev, uint16_t rx_port) txq_ctrl = mlx5_txq_get(dev, i); if (txq_ctrl == NULL) continue; - if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) { + if (!txq_ctrl->is_hairpin) { mlx5_txq_release(dev, i); continue; } @@ -819,7 +819,7 @@ mlx5_hairpin_bind_single_port(struct rte_eth_dev *dev, uint16_t rx_port) /* Pass TxQ's information to peer RxQ and try binding. */ cur.peer_q = rx_queue; cur.qp_id = txq_ctrl->obj->sq->id; - cur.vhca_id = priv->config.hca_attr.vhca_id; + cur.vhca_id = priv->sh->cdev->config.hca_attr.vhca_id; cur.tx_explicit = txq_ctrl->hairpin_conf.tx_explicit; cur.manual_bind = txq_ctrl->hairpin_conf.manual_bind; /* @@ -855,7 +855,7 @@ error: /* * Unbind the hairpin port pair, HW configuration of both devices will be clear - * and status will be reset for all the queues used between the them. + * and status will be reset for all the queues used between them. * This function only supports to unbind the Tx from one Rx. * * @param dev @@ -886,7 +886,7 @@ mlx5_hairpin_unbind_single_port(struct rte_eth_dev *dev, uint16_t rx_port) txq_ctrl = mlx5_txq_get(dev, i); if (txq_ctrl == NULL) continue; - if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) { + if (!txq_ctrl->is_hairpin) { mlx5_txq_release(dev, i); continue; } @@ -1016,7 +1016,7 @@ mlx5_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports, txq_ctrl = mlx5_txq_get(dev, i); if (!txq_ctrl) continue; - if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) { + if (!txq_ctrl->is_hairpin) { mlx5_txq_release(dev, i); continue; } @@ -1040,7 +1040,7 @@ mlx5_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports, if (rxq == NULL) continue; rxq_ctrl = rxq->ctrl; - if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) + if (!rxq_ctrl->is_hairpin) continue; pp = rxq->hairpin_conf.peers[0].port; if (pp >= RTE_MAX_ETHPORTS) { @@ -1104,8 +1104,8 @@ mlx5_dev_start(struct rte_eth_dev *dev) dev->data->port_id, strerror(rte_errno)); goto error; } - if ((priv->sh->devx && priv->config.dv_flow_en && - priv->config.dest_tir) && priv->obj_ops.lb_dummy_queue_create) { + if (mlx5_devx_obj_ops_en(priv->sh) && + priv->obj_ops.lb_dummy_queue_create) { ret = priv->obj_ops.lb_dummy_queue_create(dev); if (ret) goto error; @@ -1116,6 +1116,24 @@ mlx5_dev_start(struct rte_eth_dev *dev) dev->data->port_id, strerror(rte_errno)); goto error; } + if (priv->config.std_delay_drop || priv->config.hp_delay_drop) { + if (!priv->sh->dev_cap.vf && !priv->sh->dev_cap.sf && + !priv->representor) { + ret = mlx5_get_flag_dropless_rq(dev); + if (ret < 0) + DRV_LOG(WARNING, + "port %u cannot query dropless flag", + dev->data->port_id); + else if (!ret) + DRV_LOG(WARNING, + "port %u dropless_rq OFF, no rearming", + dev->data->port_id); + } else { + DRV_LOG(DEBUG, + "port %u doesn't support dropless_rq flag", + dev->data->port_id); + } + } ret = mlx5_rxq_start(dev); if (ret) { DRV_LOG(ERR, "port %u Rx queue allocation failed: %s", @@ -1141,6 +1159,17 @@ mlx5_dev_start(struct rte_eth_dev *dev) goto error; } mlx5_os_stats_init(dev); + /* + * Attach indirection table objects detached on port stop. + * They may be needed to create RSS in non-isolated mode. + */ + ret = mlx5_action_handle_attach(dev); + if (ret) { + DRV_LOG(ERR, + "port %u failed to attach indirect actions: %s", + dev->data->port_id, rte_strerror(rte_errno)); + goto error; + } ret = mlx5_traffic_enable(dev); if (ret) { DRV_LOG(ERR, "port %u failed to set defaults flows", @@ -1153,14 +1182,6 @@ mlx5_dev_start(struct rte_eth_dev *dev) mlx5_rxq_timestamp_set(dev); /* Set a mask and offset of scheduling on timestamp into Tx queues. */ mlx5_txq_dynf_timestamp_set(dev); - /* Attach indirection table objects detached on port stop. */ - ret = mlx5_action_handle_attach(dev); - if (ret) { - DRV_LOG(ERR, - "port %u failed to attach indirect actions: %s", - dev->data->port_id, rte_strerror(rte_errno)); - goto error; - } /* * In non-cached mode, it only needs to start the default mreg copy * action and no flow created by application exists anymore. @@ -1186,11 +1207,18 @@ mlx5_dev_start(struct rte_eth_dev *dev) priv->sh->port[priv->dev_port - 1].ih_port_id = (uint32_t)dev->data->port_id; } else { - DRV_LOG(INFO, "port %u starts without LSC and RMV interrupts.", + DRV_LOG(INFO, "port %u starts without RMV interrupts.", dev->data->port_id); - dev->data->dev_conf.intr_conf.lsc = 0; dev->data->dev_conf.intr_conf.rmv = 0; } + if (rte_intr_fd_get(priv->sh->intr_handle_nl) >= 0) { + priv->sh->port[priv->dev_port - 1].nl_ih_port_id = + (uint32_t)dev->data->port_id; + } else { + DRV_LOG(INFO, "port %u starts without LSC interrupts.", + dev->data->port_id); + dev->data->dev_conf.intr_conf.lsc = 0; + } if (rte_intr_fd_get(priv->sh->intr_handle_devx) >= 0) priv->sh->port[priv->dev_port - 1].devx_ih_port_id = (uint32_t)dev->data->port_id; @@ -1225,8 +1253,8 @@ mlx5_dev_stop(struct rte_eth_dev *dev) dev->data->dev_started = 0; /* Prevent crashes when queues are still in use. */ - dev->rx_pkt_burst = removed_rx_burst; - dev->tx_pkt_burst = removed_tx_burst; + dev->rx_pkt_burst = rte_eth_pkt_burst_dummy; + dev->tx_pkt_burst = rte_eth_pkt_burst_dummy; rte_wmb(); /* Disable datapath on secondary process. */ mlx5_mp_os_req_stop_rxtx(dev); @@ -1242,6 +1270,7 @@ mlx5_dev_stop(struct rte_eth_dev *dev) mlx5_rx_intr_vec_disable(dev); priv->sh->port[priv->dev_port - 1].ih_port_id = RTE_MAX_ETHPORTS; priv->sh->port[priv->dev_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS; + priv->sh->port[priv->dev_port - 1].nl_ih_port_id = RTE_MAX_ETHPORTS; mlx5_txq_stop(dev); mlx5_rxq_stop(dev); if (priv->obj_ops.lb_dummy_queue_release) @@ -1255,8 +1284,6 @@ mlx5_dev_stop(struct rte_eth_dev *dev) * Enable traffic flows configured by control plane * * @param dev - * Pointer to Ethernet device private data. - * @param dev * Pointer to Ethernet device structure. * * @return @@ -1299,7 +1326,7 @@ mlx5_traffic_enable(struct rte_eth_dev *dev) if (!txq_ctrl) continue; /* Only Tx implicit mode requires the default Tx flow. */ - if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN && + if (txq_ctrl->is_hairpin && txq_ctrl->hairpin_conf.tx_explicit == 0 && txq_ctrl->hairpin_conf.peers[0].port == priv->dev_data->port_id) { @@ -1309,8 +1336,7 @@ mlx5_traffic_enable(struct rte_eth_dev *dev) goto error; } } - if ((priv->representor || priv->master) && - priv->config.dv_esw_en) { + if (priv->sh->config.dv_esw_en) { if (mlx5_flow_create_devx_sq_miss_flow(dev, i) == 0) { DRV_LOG(ERR, "Port %u Tx queue %u SQ create representor devx default miss rule failed.", @@ -1320,7 +1346,7 @@ mlx5_traffic_enable(struct rte_eth_dev *dev) } mlx5_txq_release(dev, i); } - if ((priv->master || priv->representor) && priv->config.dv_esw_en) { + if (priv->sh->config.dv_esw_en) { if (mlx5_flow_create_esw_table_zero_flow(dev)) priv->fdb_def_rule = 1; else @@ -1328,7 +1354,7 @@ mlx5_traffic_enable(struct rte_eth_dev *dev) " configured - only Eswitch group 0 flows are" " supported.", dev->data->port_id); } - if (!priv->config.lacp_by_user && priv->pf_bond >= 0) { + if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0) { ret = mlx5_flow_lacp_miss(dev); if (ret) DRV_LOG(INFO, "port %u LACP rule cannot be created - "