From: Viacheslav Ovsiienko Date: Wed, 24 Nov 2021 14:33:16 +0000 (+0200) Subject: net/mlx5: fix shared Rx queue segment configuration match X-Git-Url: http://git.droids-corp.org/?p=dpdk.git;a=commitdiff_plain;h=572c9d4bda08555f21ad1f4964a60810c1a23d2d net/mlx5: fix shared Rx queue segment configuration match While joining the shared Rx queue to the existing queue group, the queue configurations is checked to be the same as it was specified in the first group queue creation - all shared queues should be created with identical configurations. During the Rx queue creation the buffer split segment configuration can be altered - the zero segment sizes are substituted with the actual ones, inherited from the pools, number of segments can be extended to cover the maximal packet length, etc. It means the actual queue segment configuration can not be used directly to match the configuration provided in the queue setup call. To resolve an issue we should store original parameters in the shared queue structure and perform the check against one of these stored ones. Fixes: 09c2555303be ("net/mlx5: support shared Rx queue") Signed-off-by: Viacheslav Ovsiienko --- diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h index f10eee406b..f808bf288f 100644 --- a/drivers/net/mlx5/mlx5_rx.h +++ b/drivers/net/mlx5/mlx5_rx.h @@ -164,6 +164,9 @@ struct mlx5_rxq_ctrl { uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */ uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */ uint32_t wqn; /* WQ number. */ + uint32_t rxseg_n; /* Number of split segment descriptions. */ + struct rte_eth_rxseg_split rxseg[MLX5_MAX_RXQ_NSEG]; + /* Saved original buffer split segment configuration. */ uint16_t dump_file_n; /* Number of dump files. */ }; diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index dadcd0825d..f77d42dedf 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -775,11 +775,14 @@ mlx5_shared_rxq_match(struct mlx5_rxq_ctrl *rxq_ctrl, struct rte_eth_dev *dev, dev->data->port_id, idx); return false; } else if (mp == NULL) { + if (conf->rx_nseg != rxq_ctrl->rxseg_n) { + DRV_LOG(ERR, "port %u queue index %u failed to join shared group: segment number mismatch", + dev->data->port_id, idx); + return false; + } for (i = 0; i < conf->rx_nseg; i++) { - if (conf->rx_seg[i].split.mp != - rxq_ctrl->rxq.rxseg[i].mp || - conf->rx_seg[i].split.length != - rxq_ctrl->rxq.rxseg[i].length) { + if (memcmp(&conf->rx_seg[i].split, &rxq_ctrl->rxseg[i], + sizeof(struct rte_eth_rxseg_split))) { DRV_LOG(ERR, "port %u queue index %u failed to join shared group: segment %u configuration mismatch", dev->data->port_id, idx, i); return false; @@ -1602,6 +1605,13 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, rxq->ctrl = tmpl; LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry); MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG); + /* + * Save the original segment configuration in the shared queue + * descriptor for the later check on the sibling queue creation. + */ + tmpl->rxseg_n = n_seg; + rte_memcpy(tmpl->rxseg, qs_seg, + sizeof(struct rte_eth_rxseg_split) * n_seg); /* * Build the array of actual buffer offsets and lengths. * Pad with the buffers from the last memory pool if