+ DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
+ dev->data->port_id, 1 << tmpl->rxq.sges_n);
+ if (desc % (1 << tmpl->rxq.sges_n)) {
+ DRV_LOG(ERR,
+ "port %u number of Rx queue descriptors (%u) is not a"
+ " multiple of SGEs per packet (%u)",
+ dev->data->port_id,
+ desc,
+ 1 << tmpl->rxq.sges_n);
+ rte_errno = EINVAL;
+ goto error;
+ }
+ /* Toggle RX checksum offload if hardware supports it. */
+ tmpl->rxq.csum = !!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM);
+ tmpl->rxq.hw_timestamp = !!(conf->offloads & DEV_RX_OFFLOAD_TIMESTAMP);
+ /* Configure VLAN stripping. */
+ tmpl->rxq.vlan_strip = !!(conf->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+ /* By default, FCS (CRC) is stripped by hardware. */
+ if (conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
+ tmpl->rxq.crc_present = 0;
+ } else if (config->hw_fcs_strip) {
+ tmpl->rxq.crc_present = 1;
+ } else {
+ DRV_LOG(WARNING,
+ "port %u CRC stripping has been disabled but will"
+ " still be performed by hardware, make sure MLNX_OFED"
+ " and firmware are up to date",
+ dev->data->port_id);
+ tmpl->rxq.crc_present = 0;
+ }
+ DRV_LOG(DEBUG,
+ "port %u CRC stripping is %s, %u bytes will be subtracted from"
+ " incoming frames to hide it",
+ dev->data->port_id,
+ tmpl->rxq.crc_present ? "disabled" : "enabled",
+ tmpl->rxq.crc_present << 2);
+ /* Save port ID. */
+ tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
+ (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
+ tmpl->rxq.port_id = dev->data->port_id;
+ tmpl->priv = priv;
+ tmpl->rxq.mp = mp;
+ tmpl->rxq.stats.idx = idx;
+ tmpl->rxq.elts_n = log2above(desc);
+ tmpl->rxq.elts =
+ (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
+ tmpl->idx = idx;
+ rte_atomic32_inc(&tmpl->refcnt);
+ DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", dev->data->port_id,
+ idx, rte_atomic32_read(&tmpl->refcnt));
+ LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
+ return tmpl;
+error:
+ rte_free(tmpl);
+ return NULL;
+}
+
+/**
+ * Get a Rx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * TX queue index.
+ *
+ * @return
+ * A pointer to the queue if it exists, NULL otherwise.
+ */
+struct mlx5_rxq_ctrl *
+mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
+
+ if ((*priv->rxqs)[idx]) {
+ rxq_ctrl = container_of((*priv->rxqs)[idx],
+ struct mlx5_rxq_ctrl,
+ rxq);
+ mlx5_rxq_ibv_get(dev, idx);
+ rte_atomic32_inc(&rxq_ctrl->refcnt);
+ DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d",
+ dev->data->port_id, rxq_ctrl->idx,
+ rte_atomic32_read(&rxq_ctrl->refcnt));
+ }
+ return rxq_ctrl;
+}
+
+/**
+ * Release a Rx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * TX queue index.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+int
+mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+
+ if (!(*priv->rxqs)[idx])
+ return 0;
+ rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
+ assert(rxq_ctrl->priv);
+ if (rxq_ctrl->ibv && !mlx5_rxq_ibv_release(rxq_ctrl->ibv))
+ rxq_ctrl->ibv = NULL;
+ DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", dev->data->port_id,
+ rxq_ctrl->idx, rte_atomic32_read(&rxq_ctrl->refcnt));
+ if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
+ LIST_REMOVE(rxq_ctrl, next);
+ rte_free(rxq_ctrl);
+ (*priv->rxqs)[idx] = NULL;
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ * Verify if the queue can be released.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * TX queue index.
+ *
+ * @return
+ * 1 if the queue can be released, negative errno otherwise and rte_errno is
+ * set.
+ */
+int
+mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+
+ if (!(*priv->rxqs)[idx]) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
+ return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
+}
+
+/**
+ * Verify the Rx Queue list is empty
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * The number of object not released.
+ */
+int
+mlx5_rxq_verify(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+ int ret = 0;
+
+ LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
+ DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
+ dev->data->port_id, rxq_ctrl->idx);
+ ++ret;
+ }
+ return ret;
+}
+
+/**
+ * Create an indirection table.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param queues
+ * Queues entering in the indirection table.
+ * @param queues_n
+ * Number of queues in the array.
+ *
+ * @return
+ * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_ind_table_ibv *
+mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues,
+ uint32_t queues_n)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_ind_table_ibv *ind_tbl;
+ const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
+ log2above(queues_n) :
+ log2above(priv->config.ind_table_max_size);
+ struct ibv_wq *wq[1 << wq_n];
+ unsigned int i;
+ unsigned int j;
+
+ ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
+ queues_n * sizeof(uint16_t), 0);
+ if (!ind_tbl) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ for (i = 0; i != queues_n; ++i) {
+ struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]);
+
+ if (!rxq)
+ goto error;
+ wq[i] = rxq->ibv->wq;
+ ind_tbl->queues[i] = queues[i];
+ }
+ ind_tbl->queues_n = queues_n;
+ /* Finalise indirection table. */
+ for (j = 0; i != (unsigned int)(1 << wq_n); ++i, ++j)
+ wq[i] = wq[j];
+ ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
+ (priv->ctx,
+ &(struct ibv_rwq_ind_table_init_attr){
+ .log_ind_tbl_size = wq_n,
+ .ind_tbl = wq,
+ .comp_mask = 0,
+ });
+ if (!ind_tbl->ind_table) {
+ rte_errno = errno;
+ goto error;
+ }
+ rte_atomic32_inc(&ind_tbl->refcnt);
+ LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
+ DEBUG("port %u new indirection table %p: queues:%u refcnt:%d",
+ dev->data->port_id, (void *)ind_tbl, 1 << wq_n,
+ rte_atomic32_read(&ind_tbl->refcnt));
+ return ind_tbl;
+error:
+ rte_free(ind_tbl);
+ DRV_LOG(DEBUG, "port %u cannot create indirection table",
+ dev->data->port_id);
+ return NULL;
+}
+
+/**
+ * Get an indirection table.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param queues
+ * Queues entering in the indirection table.
+ * @param queues_n
+ * Number of queues in the array.
+ *
+ * @return
+ * An indirection table if found.
+ */
+struct mlx5_ind_table_ibv *
+mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, const uint16_t *queues,
+ uint32_t queues_n)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_ind_table_ibv *ind_tbl;
+
+ LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
+ if ((ind_tbl->queues_n == queues_n) &&
+ (memcmp(ind_tbl->queues, queues,
+ ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
+ == 0))
+ break;
+ }
+ if (ind_tbl) {
+ unsigned int i;
+
+ rte_atomic32_inc(&ind_tbl->refcnt);
+ DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
+ dev->data->port_id, (void *)ind_tbl,
+ rte_atomic32_read(&ind_tbl->refcnt));
+ for (i = 0; i != ind_tbl->queues_n; ++i)
+ mlx5_rxq_get(dev, ind_tbl->queues[i]);
+ }
+ return ind_tbl;
+}
+
+/**
+ * Release an indirection table.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param ind_table
+ * Indirection table to release.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+int
+mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
+ struct mlx5_ind_table_ibv *ind_tbl)
+{
+ unsigned int i;
+
+ DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
+ dev->data->port_id, (void *)ind_tbl,
+ rte_atomic32_read(&ind_tbl->refcnt));
+ if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) {
+ claim_zero(mlx5_glue->destroy_rwq_ind_table
+ (ind_tbl->ind_table));
+ DEBUG("port %u delete indirection table %p: queues: %u",
+ dev->data->port_id, (void *)ind_tbl, ind_tbl->queues_n);
+ }
+ for (i = 0; i != ind_tbl->queues_n; ++i)
+ claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
+ if (!rte_atomic32_read(&ind_tbl->refcnt)) {
+ LIST_REMOVE(ind_tbl, next);
+ rte_free(ind_tbl);
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ * Verify the Rx Queue list is empty
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * The number of object not released.
+ */
+int
+mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_ind_table_ibv *ind_tbl;
+ int ret = 0;
+
+ LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
+ DRV_LOG(DEBUG,
+ "port %u Verbs indirection table %p still referenced",
+ dev->data->port_id, (void *)ind_tbl);
+ ++ret;
+ }
+ return ret;
+}
+
+/**
+ * Create an Rx Hash queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param rss_key
+ * RSS key for the Rx hash queue.
+ * @param rss_key_len
+ * RSS key length.
+ * @param hash_fields
+ * Verbs protocol hash field to make the RSS on.
+ * @param queues
+ * Queues entering in hash queue. In case of empty hash_fields only the
+ * first queue index will be taken for the indirection table.
+ * @param queues_n
+ * Number of queues.
+ * @param tunnel
+ * Tunnel type, implies tunnel offloading like inner checksum if available.
+ * @param rss_level
+ * RSS hash on tunnel level.
+ *
+ * @return
+ * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_hrxq *
+mlx5_hrxq_new(struct rte_eth_dev *dev,
+ const uint8_t *rss_key, uint32_t rss_key_len,
+ uint64_t hash_fields,
+ const uint16_t *queues, uint32_t queues_n,
+ uint32_t tunnel, uint32_t rss_level)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_hrxq *hrxq;
+ struct mlx5_ind_table_ibv *ind_tbl;
+ struct ibv_qp *qp;
+ int err;
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ struct mlx5dv_qp_init_attr qp_init_attr = {0};
+#endif
+
+ queues_n = hash_fields ? queues_n : 1;
+ ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
+ if (!ind_tbl)
+ ind_tbl = mlx5_ind_table_ibv_new(dev, queues, queues_n);
+ if (!ind_tbl) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ if (!rss_key_len) {
+ rss_key_len = rss_hash_default_key_len;
+ rss_key = rss_hash_default_key;
+ }
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ if (tunnel) {
+ qp_init_attr.comp_mask =
+ MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
+ qp_init_attr.create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
+ }
+ qp = mlx5_glue->dv_create_qp
+ (priv->ctx,
+ &(struct ibv_qp_init_attr_ex){
+ .qp_type = IBV_QPT_RAW_PACKET,
+ .comp_mask =
+ IBV_QP_INIT_ATTR_PD |
+ IBV_QP_INIT_ATTR_IND_TABLE |
+ IBV_QP_INIT_ATTR_RX_HASH,
+ .rx_hash_conf = (struct ibv_rx_hash_conf){
+ .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
+ .rx_hash_key_len = rss_key_len ? rss_key_len :
+ rss_hash_default_key_len,
+ .rx_hash_key = rss_key ?
+ (void *)(uintptr_t)rss_key :
+ rss_hash_default_key,
+ .rx_hash_fields_mask = hash_fields |
+ (tunnel && rss_level > 1 ?
+ (uint32_t)IBV_RX_HASH_INNER : 0),
+ },
+ .rwq_ind_tbl = ind_tbl->ind_table,
+ .pd = priv->pd,
+ },
+ &qp_init_attr);
+ DEBUG("port %u new QP:%p ind_tbl:%p hash_fields:0x%" PRIx64
+ " tunnel:0x%x level:%u dv_attr:comp_mask:0x%" PRIx64
+ " create_flags:0x%x",
+ dev->data->port_id, (void *)qp, (void *)ind_tbl,
+ (tunnel && rss_level == 2 ? (uint32_t)IBV_RX_HASH_INNER : 0) |
+ hash_fields, tunnel, rss_level,
+ qp_init_attr.comp_mask, qp_init_attr.create_flags);
+#else
+ qp = mlx5_glue->create_qp_ex
+ (priv->ctx,
+ &(struct ibv_qp_init_attr_ex){
+ .qp_type = IBV_QPT_RAW_PACKET,
+ .comp_mask =
+ IBV_QP_INIT_ATTR_PD |
+ IBV_QP_INIT_ATTR_IND_TABLE |
+ IBV_QP_INIT_ATTR_RX_HASH,
+ .rx_hash_conf = (struct ibv_rx_hash_conf){
+ .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
+ .rx_hash_key_len = rss_key_len ? rss_key_len :
+ rss_hash_default_key_len,
+ .rx_hash_key = rss_key ?
+ (void *)(uintptr_t)rss_key :
+ rss_hash_default_key,
+ .rx_hash_fields_mask = hash_fields,
+ },
+ .rwq_ind_tbl = ind_tbl->ind_table,
+ .pd = priv->pd,
+ });
+ DEBUG("port %u new QP:%p ind_tbl:%p hash_fields:0x%" PRIx64
+ " tunnel:0x%x level:%hhu",
+ dev->data->port_id, (void *)qp, (void *)ind_tbl,
+ hash_fields, tunnel, rss_level);
+#endif
+ if (!qp) {
+ rte_errno = errno;
+ goto error;
+ }
+ hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
+ if (!hrxq)
+ goto error;
+ hrxq->ind_table = ind_tbl;
+ hrxq->qp = qp;
+ hrxq->rss_key_len = rss_key_len;
+ hrxq->hash_fields = hash_fields;
+ hrxq->tunnel = tunnel;
+ hrxq->rss_level = rss_level;
+ memcpy(hrxq->rss_key, rss_key, rss_key_len);
+ rte_atomic32_inc(&hrxq->refcnt);
+ LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
+ DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
+ dev->data->port_id, (void *)hrxq,
+ rte_atomic32_read(&hrxq->refcnt));
+ return hrxq;
+error:
+ err = rte_errno; /* Save rte_errno before cleanup. */
+ mlx5_ind_table_ibv_release(dev, ind_tbl);
+ if (qp)
+ claim_zero(mlx5_glue->destroy_qp(qp));
+ rte_errno = err; /* Restore rte_errno. */
+ return NULL;
+}
+
+/**
+ * Get an Rx Hash queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param rss_conf
+ * RSS configuration for the Rx hash queue.
+ * @param queues
+ * Queues entering in hash queue. In case of empty hash_fields only the
+ * first queue index will be taken for the indirection table.
+ * @param queues_n
+ * Number of queues.
+ * @param tunnel
+ * Tunnel type, implies tunnel offloading like inner checksum if available.
+ * @param rss_level
+ * RSS hash on tunnel level
+ *
+ * @return
+ * An hash Rx queue on success.
+ */
+struct mlx5_hrxq *
+mlx5_hrxq_get(struct rte_eth_dev *dev,
+ const uint8_t *rss_key, uint32_t rss_key_len,
+ uint64_t hash_fields,
+ const uint16_t *queues, uint32_t queues_n,
+ uint32_t tunnel, uint32_t rss_level)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_hrxq *hrxq;
+
+ queues_n = hash_fields ? queues_n : 1;
+ LIST_FOREACH(hrxq, &priv->hrxqs, next) {
+ struct mlx5_ind_table_ibv *ind_tbl;
+
+ if (hrxq->rss_key_len != rss_key_len)
+ continue;
+ if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
+ continue;
+ if (hrxq->hash_fields != hash_fields)
+ continue;
+ if (hrxq->tunnel != tunnel)
+ continue;
+ if (hrxq->rss_level != rss_level)
+ continue;
+ ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
+ if (!ind_tbl)
+ continue;
+ if (ind_tbl != hrxq->ind_table) {
+ mlx5_ind_table_ibv_release(dev, ind_tbl);
+ continue;
+ }
+ rte_atomic32_inc(&hrxq->refcnt);
+ DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
+ dev->data->port_id, (void *)hrxq,
+ rte_atomic32_read(&hrxq->refcnt));
+ return hrxq;
+ }
+ return NULL;
+}
+
+/**
+ * Release the hash Rx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param hrxq
+ * Pointer to Hash Rx queue to release.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+int
+mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
+{
+ DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
+ dev->data->port_id, (void *)hrxq,
+ rte_atomic32_read(&hrxq->refcnt));
+ if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
+ claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
+ DEBUG("port %u delete QP %p: hash: 0x%" PRIx64 ", tunnel:"
+ " 0x%x, level: %u",
+ dev->data->port_id, (void *)hrxq, hrxq->hash_fields,
+ hrxq->tunnel, hrxq->rss_level);
+ mlx5_ind_table_ibv_release(dev, hrxq->ind_table);
+ LIST_REMOVE(hrxq, next);
+ rte_free(hrxq);
+ return 0;
+ }
+ claim_nonzero(mlx5_ind_table_ibv_release(dev, hrxq->ind_table));
+ return 1;
+}
+
+/**
+ * Verify the Rx Queue list is empty
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * The number of object not released.
+ */
+int
+mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_hrxq *hrxq;
+ int ret = 0;
+
+ LIST_FOREACH(hrxq, &priv->hrxqs, next) {
+ DRV_LOG(DEBUG,
+ "port %u Verbs hash Rx queue %p still referenced",
+ dev->data->port_id, (void *)hrxq);
+ ++ret;
+ }
+ return ret;