net/bonding: change state machine to defaulted
[dpdk.git] / drivers / net / mlx5 / mlx5_txq.c
index 78ec361..35b3ade 100644 (file)
@@ -4,7 +4,6 @@
  */
 
 #include <stddef.h>
-#include <assert.h>
 #include <errno.h>
 #include <string.h>
 #include <stdint.h>
 #include <rte_ethdev_driver.h>
 #include <rte_common.h>
 
-#include "mlx5_utils.h"
+#include <mlx5_glue.h>
+#include <mlx5_devx_cmds.h>
+#include <mlx5_common.h>
+#include <mlx5_common_mr.h>
+
 #include "mlx5_defs.h"
+#include "mlx5_utils.h"
 #include "mlx5.h"
 #include "mlx5_rxtx.h"
 #include "mlx5_autoconf.h"
-#include "mlx5_glue.h"
 
 /**
  * Allocate TX queue elements.
@@ -80,9 +83,9 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
        while (elts_tail != elts_head) {
                struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
 
-               assert(elt != NULL);
+               MLX5_ASSERT(elt != NULL);
                rte_pktmbuf_free_seg(elt);
-#ifndef NDEBUG
+#ifdef RTE_LIBRTE_MLX5_DEBUG
                /* Poisoning. */
                memset(&(*elts)[elts_tail & elts_m],
                       0x77,
@@ -147,27 +150,27 @@ mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
-mlx5_tx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc)
+mlx5_tx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
 
-       if (desc <= MLX5_TX_COMP_THRESH) {
+       if (*desc <= MLX5_TX_COMP_THRESH) {
                DRV_LOG(WARNING,
                        "port %u number of descriptors requested for Tx queue"
                        " %u must be higher than MLX5_TX_COMP_THRESH, using %u"
-                       " instead of %u",
-                       dev->data->port_id, idx, MLX5_TX_COMP_THRESH + 1, desc);
-               desc = MLX5_TX_COMP_THRESH + 1;
+                       " instead of %u", dev->data->port_id, idx,
+                       MLX5_TX_COMP_THRESH + 1, *desc);
+               *desc = MLX5_TX_COMP_THRESH + 1;
        }
-       if (!rte_is_power_of_2(desc)) {
-               desc = 1 << log2above(desc);
+       if (!rte_is_power_of_2(*desc)) {
+               *desc = 1 << log2above(*desc);
                DRV_LOG(WARNING,
                        "port %u increased number of descriptors in Tx queue"
                        " %u to the next power of two (%d)",
-                       dev->data->port_id, idx, desc);
+                       dev->data->port_id, idx, *desc);
        }
        DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors",
-               dev->data->port_id, idx, desc);
+               dev->data->port_id, idx, *desc);
        if (idx >= priv->txqs_n) {
                DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)",
                        dev->data->port_id, idx, priv->txqs_n);
@@ -210,7 +213,7 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                container_of(txq, struct mlx5_txq_ctrl, txq);
        int res;
 
-       res = mlx5_tx_queue_pre_setup(dev, idx, desc);
+       res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
        if (res)
                return res;
        txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
@@ -251,7 +254,7 @@ mlx5_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
                container_of(txq, struct mlx5_txq_ctrl, txq);
        int res;
 
-       res = mlx5_tx_queue_pre_setup(dev, idx, desc);
+       res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
        if (res)
                return res;
        if (hairpin_conf->peer_count != 1 ||
@@ -295,9 +298,9 @@ mlx5_tx_queue_release(void *dpdk_txq)
        priv = txq_ctrl->priv;
        for (i = 0; (i != priv->txqs_n); ++i)
                if ((*priv->txqs)[i] == txq) {
-                       mlx5_txq_release(ETH_DEV(priv), i);
                        DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
                                PORT_ID(priv), txq->idx);
+                       mlx5_txq_release(ETH_DEV(priv), i);
                        break;
                }
 }
@@ -344,8 +347,8 @@ txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl)
 
        if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
                return;
-       assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
-       assert(ppriv);
+       MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
+       MLX5_ASSERT(ppriv);
        ppriv->uar_table[txq_ctrl->txq.idx] = txq_ctrl->bf_reg;
        txq_uar_ncattr_init(txq_ctrl, page_size);
 #ifndef RTE_ARCH_64
@@ -383,7 +386,7 @@ txq_uar_init_secondary(struct mlx5_txq_ctrl *txq_ctrl, int fd)
 
        if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
                return 0;
-       assert(ppriv);
+       MLX5_ASSERT(ppriv);
        /*
         * As rdma-core, UARs are mapped in size of OS page
         * size. Ref to libmlx5 function: mlx5_init_context()
@@ -424,6 +427,30 @@ txq_uar_uninit_secondary(struct mlx5_txq_ctrl *txq_ctrl)
        munmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
 }
 
+/**
+ * Deinitialize Tx UAR registers for secondary process.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ */
+void
+mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_txq_data *txq;
+       struct mlx5_txq_ctrl *txq_ctrl;
+       unsigned int i;
+
+       MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
+       for (i = 0; i != priv->txqs_n; ++i) {
+               if (!(*priv->txqs)[i])
+                       continue;
+               txq = (*priv->txqs)[i];
+               txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
+               txq_uar_uninit_secondary(txq_ctrl);
+       }
+}
+
 /**
  * Initialize Tx UAR registers for secondary process.
  *
@@ -444,7 +471,7 @@ mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd)
        unsigned int i;
        int ret;
 
-       assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
+       MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
        for (i = 0; i != priv->txqs_n; ++i) {
                if (!(*priv->txqs)[i])
                        continue;
@@ -452,7 +479,7 @@ mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd)
                txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
                if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
                        continue;
-               assert(txq->idx == (uint16_t)i);
+               MLX5_ASSERT(txq->idx == (uint16_t)i);
                ret = txq_uar_init_secondary(txq_ctrl, fd);
                if (ret)
                        goto error;
@@ -490,10 +517,10 @@ mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
                container_of(txq_data, struct mlx5_txq_ctrl, txq);
        struct mlx5_devx_create_sq_attr attr = { 0 };
        struct mlx5_txq_obj *tmpl = NULL;
-       int ret = 0;
+       uint32_t max_wq_data;
 
-       assert(txq_data);
-       assert(!txq_ctrl->obj);
+       MLX5_ASSERT(txq_data);
+       MLX5_ASSERT(!txq_ctrl->obj);
        tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
                                 txq_ctrl->socket);
        if (!tmpl) {
@@ -501,39 +528,48 @@ mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
                        "port %u Tx queue %u cannot allocate memory resources",
                        dev->data->port_id, txq_data->idx);
                rte_errno = ENOMEM;
-               goto error;
+               return NULL;
        }
        tmpl->type = MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN;
        tmpl->txq_ctrl = txq_ctrl;
        attr.hairpin = 1;
        attr.tis_lst_sz = 1;
-       /* Workaround for hairpin startup */
-       attr.wq_attr.log_hairpin_num_packets = log2above(32);
-       /* Workaround for packets larger than 1KB */
-       attr.wq_attr.log_hairpin_data_sz =
-                       priv->config.hca_attr.log_max_hairpin_wq_data_sz;
+       max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
+       /* Jumbo frames > 9KB should be supported, and more packets. */
+       if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
+               if (priv->config.log_hp_size > max_wq_data) {
+                       DRV_LOG(ERR, "total data size %u power of 2 is "
+                               "too large for hairpin",
+                               priv->config.log_hp_size);
+                       rte_free(tmpl);
+                       rte_errno = ERANGE;
+                       return NULL;
+               }
+               attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
+       } else {
+               attr.wq_attr.log_hairpin_data_sz =
+                               (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
+                                max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
+       }
+       /* Set the packets number to the maximum value for performance. */
+       attr.wq_attr.log_hairpin_num_packets =
+                       attr.wq_attr.log_hairpin_data_sz -
+                       MLX5_HAIRPIN_QUEUE_STRIDE;
        attr.tis_num = priv->sh->tis->id;
        tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->ctx, &attr);
        if (!tmpl->sq) {
                DRV_LOG(ERR,
                        "port %u tx hairpin queue %u can't create sq object",
                        dev->data->port_id, idx);
+               rte_free(tmpl);
                rte_errno = errno;
-               goto error;
+               return NULL;
        }
        DRV_LOG(DEBUG, "port %u sxq %u updated with %p", dev->data->port_id,
                idx, (void *)&tmpl);
        rte_atomic32_inc(&tmpl->refcnt);
        LIST_INSERT_HEAD(&priv->txqsobj, tmpl, next);
        return tmpl;
-error:
-       ret = rte_errno; /* Save rte_errno before cleanup. */
-       if (tmpl->tis)
-               mlx5_devx_cmd_destroy(tmpl->tis);
-       if (tmpl->sq)
-               mlx5_devx_cmd_destroy(tmpl->sq);
-       rte_errno = ret; /* Restore rte_errno. */
-       return NULL;
 }
 
 /**
@@ -578,7 +614,7 @@ mlx5_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
        if (priv->config.devx && !priv->sh->tdn)
                qp.comp_mask |= MLX5DV_QP_MASK_RAW_QP_HANDLES;
 #endif
-       assert(txq_data);
+       MLX5_ASSERT(txq_data);
        priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE;
        priv->verbs_alloc_ctx.obj = txq_ctrl;
        if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
@@ -609,9 +645,9 @@ mlx5_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
                .cap = {
                        /* Max number of outstanding WRs. */
                        .max_send_wr =
-                               ((priv->sh->device_attr.orig_attr.max_qp_wr <
+                               ((priv->sh->device_attr.max_qp_wr <
                                  desc) ?
-                                priv->sh->device_attr.orig_attr.max_qp_wr :
+                                priv->sh->device_attr.max_qp_wr :
                                 desc),
                        /*
                         * Max number of scatter/gather elements in a WR,
@@ -648,7 +684,7 @@ mlx5_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
                /* Move the QP to this state. */
                .qp_state = IBV_QPS_INIT,
                /* IB device port number. */
-               .port_num = (uint8_t)priv->ibv_port,
+               .port_num = (uint8_t)priv->dev_port,
        };
        ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod,
                                   (IBV_QP_STATE | IBV_QP_PORT));
@@ -717,13 +753,22 @@ mlx5_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
        txq_data->cq_db = cq_info.dbrec;
        txq_data->cqes = (volatile struct mlx5_cqe *)cq_info.buf;
        txq_data->cq_ci = 0;
-#ifndef NDEBUG
        txq_data->cq_pi = 0;
-#endif
        txq_data->wqe_ci = 0;
        txq_data->wqe_pi = 0;
        txq_data->wqe_comp = 0;
        txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
+       txq_data->fcqs = rte_calloc_socket(__func__,
+                                          txq_data->cqe_s,
+                                          sizeof(*txq_data->fcqs),
+                                          RTE_CACHE_LINE_SIZE,
+                                          txq_ctrl->socket);
+       if (!txq_data->fcqs) {
+               DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory (FCQ)",
+                       dev->data->port_id, idx);
+               rte_errno = ENOMEM;
+               goto error;
+       }
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
        /*
         * If using DevX need to query and store TIS transport domain value.
@@ -772,6 +817,8 @@ error:
                claim_zero(mlx5_glue->destroy_cq(tmpl.cq));
        if (tmpl.qp)
                claim_zero(mlx5_glue->destroy_qp(tmpl.qp));
+       if (txq_data->fcqs)
+               rte_free(txq_data->fcqs);
        if (txq_obj)
                rte_free(txq_obj);
        priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
@@ -818,7 +865,7 @@ mlx5_txq_obj_get(struct rte_eth_dev *dev, uint16_t idx)
 int
 mlx5_txq_obj_release(struct mlx5_txq_obj *txq_obj)
 {
-       assert(txq_obj);
+       MLX5_ASSERT(txq_obj);
        if (rte_atomic32_dec_and_test(&txq_obj->refcnt)) {
                if (txq_obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN) {
                        if (txq_obj->tis)
@@ -826,6 +873,8 @@ mlx5_txq_obj_release(struct mlx5_txq_obj *txq_obj)
                } else {
                        claim_zero(mlx5_glue->destroy_qp(txq_obj->qp));
                        claim_zero(mlx5_glue->destroy_cq(txq_obj->cq));
+                               if (txq_obj->txq_ctrl->txq.fcqs)
+                                       rte_free(txq_obj->txq_ctrl->txq.fcqs);
                }
                LIST_REMOVE(txq_obj, next);
                rte_free(txq_obj);
@@ -899,7 +948,7 @@ txq_calc_inline_max(struct mlx5_txq_ctrl *txq_ctrl)
        struct mlx5_priv *priv = txq_ctrl->priv;
        unsigned int wqe_size;
 
-       wqe_size = priv->sh->device_attr.orig_attr.max_qp_wr / desc;
+       wqe_size = priv->sh->device_attr.max_qp_wr / desc;
        if (!wqe_size)
                return 0;
        /*
@@ -963,7 +1012,7 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
         * If there is requested minimal amount of data to inline
         * we MUST enable inlining. This is a case for ConnectX-4
         * which usually requires L2 inlined for correct operating
-        * and ConnectX-4LX which requires L2-L4 inlined to
+        * and ConnectX-4 Lx which requires L2-L4 inlined to
         * support E-Switch Flows.
         */
        if (inlen_mode) {
@@ -1034,12 +1083,12 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
                 * beginning of inlining buffer in Ethernet
                 * Segment.
                 */
-               assert(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
-               assert(inlen_send <= MLX5_WQE_SIZE_MAX +
-                                    MLX5_ESEG_MIN_INLINE_SIZE -
-                                    MLX5_WQE_CSEG_SIZE -
-                                    MLX5_WQE_ESEG_SIZE -
-                                    MLX5_WQE_DSEG_SIZE * 2);
+               MLX5_ASSERT(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
+               MLX5_ASSERT(inlen_send <= MLX5_WQE_SIZE_MAX +
+                                         MLX5_ESEG_MIN_INLINE_SIZE -
+                                         MLX5_WQE_CSEG_SIZE -
+                                         MLX5_WQE_ESEG_SIZE -
+                                         MLX5_WQE_DSEG_SIZE * 2);
        } else if (inlen_mode) {
                /*
                 * If minimal inlining is requested we must
@@ -1089,12 +1138,12 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
                                PORT_ID(priv), inlen_empw, temp);
                        inlen_empw = temp;
                }
-               assert(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE);
-               assert(inlen_empw <= MLX5_WQE_SIZE_MAX +
-                                    MLX5_DSEG_MIN_INLINE_SIZE -
-                                    MLX5_WQE_CSEG_SIZE -
-                                    MLX5_WQE_ESEG_SIZE -
-                                    MLX5_WQE_DSEG_SIZE);
+               MLX5_ASSERT(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE);
+               MLX5_ASSERT(inlen_empw <= MLX5_WQE_SIZE_MAX +
+                                         MLX5_DSEG_MIN_INLINE_SIZE -
+                                         MLX5_WQE_CSEG_SIZE -
+                                         MLX5_WQE_ESEG_SIZE -
+                                         MLX5_WQE_DSEG_SIZE);
                txq_ctrl->txq.inlen_empw = inlen_empw;
        }
        txq_ctrl->max_inline_data = RTE_MAX(inlen_send, inlen_empw);
@@ -1154,7 +1203,7 @@ txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
                        " Tx queue size (%d)",
                        txq_ctrl->txq.inlen_mode, max_inline,
                        priv->dev_data->port_id,
-                       priv->sh->device_attr.orig_attr.max_qp_wr);
+                       priv->sh->device_attr.max_qp_wr);
                goto error;
        }
        if (txq_ctrl->txq.inlen_send > max_inline &&
@@ -1166,7 +1215,7 @@ txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
                        " Tx queue size (%d)",
                        txq_ctrl->txq.inlen_send, max_inline,
                        priv->dev_data->port_id,
-                       priv->sh->device_attr.orig_attr.max_qp_wr);
+                       priv->sh->device_attr.max_qp_wr);
                goto error;
        }
        if (txq_ctrl->txq.inlen_empw > max_inline &&
@@ -1178,7 +1227,7 @@ txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
                        " Tx queue size (%d)",
                        txq_ctrl->txq.inlen_empw, max_inline,
                        priv->dev_data->port_id,
-                       priv->sh->device_attr.orig_attr.max_qp_wr);
+                       priv->sh->device_attr.max_qp_wr);
                goto error;
        }
        if (txq_ctrl->txq.tso_en && max_inline < MLX5_MAX_TSO_HEADER) {
@@ -1188,7 +1237,7 @@ txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
                        " Tx queue size (%d)",
                        MLX5_MAX_TSO_HEADER, max_inline,
                        priv->dev_data->port_id,
-                       priv->sh->device_attr.orig_attr.max_qp_wr);
+                       priv->sh->device_attr.max_qp_wr);
                goto error;
        }
        if (txq_ctrl->txq.inlen_send > max_inline) {
@@ -1209,11 +1258,11 @@ txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
        }
        txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->txq.inlen_send,
                                            txq_ctrl->txq.inlen_empw);
-       assert(txq_ctrl->max_inline_data <= max_inline);
-       assert(txq_ctrl->txq.inlen_mode <= max_inline);
-       assert(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_send);
-       assert(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_empw ||
-              !txq_ctrl->txq.inlen_empw);
+       MLX5_ASSERT(txq_ctrl->max_inline_data <= max_inline);
+       MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= max_inline);
+       MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_send);
+       MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_empw ||
+                   !txq_ctrl->txq.inlen_empw);
        return 0;
 error:
        rte_errno = ENOMEM;
@@ -1258,8 +1307,8 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                goto error;
        }
        /* Save pointer of global generation number to check memory event. */
-       tmpl->txq.mr_ctrl.dev_gen_ptr = &priv->sh->mr.dev_gen;
-       assert(desc > MLX5_TX_COMP_THRESH);
+       tmpl->txq.mr_ctrl.dev_gen_ptr = &priv->sh->share_cache.dev_gen;
+       MLX5_ASSERT(desc > MLX5_TX_COMP_THRESH);
        tmpl->txq.offloads = conf->offloads |
                             dev->data->dev_conf.txmode.offloads;
        tmpl->priv = priv;
@@ -1273,12 +1322,12 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
        if (txq_adjust_params(tmpl))
                goto error;
        if (txq_calc_wqebb_cnt(tmpl) >
-           priv->sh->device_attr.orig_attr.max_qp_wr) {
+           priv->sh->device_attr.max_qp_wr) {
                DRV_LOG(ERR,
                        "port %u Tx WQEBB count (%d) exceeds the limit (%d),"
                        " try smaller queue size",
                        dev->data->port_id, txq_calc_wqebb_cnt(tmpl),
-                       priv->sh->device_attr.orig_attr.max_qp_wr);
+                       priv->sh->device_attr.max_qp_wr);
                rte_errno = ENOMEM;
                goto error;
        }