From: NĂ©lio Laranjeiro Date: Fri, 24 Jun 2016 13:17:45 +0000 (+0200) Subject: net/mlx5: remove inline Tx support X-Git-Tag: spdx-start~6402 X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=0431c40f47021fefd6b2064be1c02596e07f858e;p=dpdk.git net/mlx5: remove inline Tx support Inline TX will be fully managed by the PMD after Verbs is bypassed in the data path. Remove the current code until then. Signed-off-by: Nelio Laranjeiro Signed-off-by: Adrien Mazarguil --- diff --git a/config/common_base b/config/common_base index 6f283ac691..cfee8257e5 100644 --- a/config/common_base +++ b/config/common_base @@ -207,7 +207,6 @@ CONFIG_RTE_LIBRTE_MLX4_SOFT_COUNTERS=1 # CONFIG_RTE_LIBRTE_MLX5_PMD=n CONFIG_RTE_LIBRTE_MLX5_DEBUG=n -CONFIG_RTE_LIBRTE_MLX5_MAX_INLINE=0 CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE=8 # diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst index 84c35a0541..77fa957392 100644 --- a/doc/guides/nics/mlx5.rst +++ b/doc/guides/nics/mlx5.rst @@ -114,16 +114,6 @@ These options can be modified in the ``.config`` file. adds additional run-time checks and debugging messages at the cost of lower performance. -- ``CONFIG_RTE_LIBRTE_MLX5_MAX_INLINE`` (default **0**) - - Amount of data to be inlined during TX operations. Improves latency. - Can improve PPS performance when PCI backpressure is detected and may be - useful for scenarios involving heavy traffic on many queues. - - Since the additional software logic necessary to handle this mode can - lower performance when there is no backpressure, it is not enabled by - default. - - ``CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE`` (default **8**) Maximum number of cached memory pools (MPs) per TX queue. Each MP from diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile index 656a6e18d4..289c85e175 100644 --- a/drivers/net/mlx5/Makefile +++ b/drivers/net/mlx5/Makefile @@ -86,10 +86,6 @@ else CFLAGS += -DNDEBUG -UPEDANTIC endif -ifdef CONFIG_RTE_LIBRTE_MLX5_MAX_INLINE -CFLAGS += -DMLX5_PMD_MAX_INLINE=$(CONFIG_RTE_LIBRTE_MLX5_MAX_INLINE) -endif - ifdef CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE CFLAGS += -DMLX5_PMD_TX_MP_CACHE=$(CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE) endif diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h index da1c90e791..9a1983520d 100644 --- a/drivers/net/mlx5/mlx5_defs.h +++ b/drivers/net/mlx5/mlx5_defs.h @@ -54,11 +54,6 @@ /* RSS Indirection table size. */ #define RSS_INDIRECTION_TABLE_SIZE 256 -/* Maximum size for inline data. */ -#ifndef MLX5_PMD_MAX_INLINE -#define MLX5_PMD_MAX_INLINE 0 -#endif - /* * Maximum number of cached Memory Pools (MPs) per TX queue. Each RTE MP * from which buffers are to be transmitted will have to be mapped by this diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index 9f50e7443b..7480a33100 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -329,58 +329,33 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) rte_prefetch0((volatile void *) (uintptr_t)buf_next_addr); } - /* Put packet into send queue. */ -#if MLX5_PMD_MAX_INLINE > 0 - if (length <= txq->max_inline) { -#ifdef HAVE_VERBS_VLAN_INSERTION - if (insert_vlan) - err = txq->send_pending_inline_vlan - (txq->qp, - (void *)addr, - length, - send_flags, - &buf->vlan_tci); - else -#endif /* HAVE_VERBS_VLAN_INSERTION */ - err = txq->send_pending_inline - (txq->qp, - (void *)addr, - length, - send_flags); - } else -#endif - { - /* - * Retrieve Memory Region key for this - * memory pool. - */ - lkey = txq_mp2mr(txq, txq_mb2mp(buf)); - if (unlikely(lkey == (uint32_t)-1)) { - /* MR does not exist. */ - DEBUG("%p: unable to get MP <-> MR" - " association", (void *)txq); - /* Clean up TX element. */ - elt->buf = NULL; - goto stop; - } + /* Retrieve Memory Region key for this memory pool. */ + lkey = txq_mp2mr(txq, txq_mb2mp(buf)); + if (unlikely(lkey == (uint32_t)-1)) { + /* MR does not exist. */ + DEBUG("%p: unable to get MP <-> MR" + " association", (void *)txq); + /* Clean up TX element. */ + elt->buf = NULL; + goto stop; + } #ifdef HAVE_VERBS_VLAN_INSERTION - if (insert_vlan) - err = txq->send_pending_vlan - (txq->qp, - addr, - length, - lkey, - send_flags, - &buf->vlan_tci); - else + if (insert_vlan) + err = txq->send_pending_vlan + (txq->qp, + addr, + length, + lkey, + send_flags, + &buf->vlan_tci); + else #endif /* HAVE_VERBS_VLAN_INSERTION */ - err = txq->send_pending - (txq->qp, - addr, - length, - lkey, - send_flags); - } + err = txq->send_pending + (txq->qp, + addr, + length, + lkey, + send_flags); if (unlikely(err)) goto stop; #ifdef MLX5_PMD_SOFT_COUNTERS diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 2e1f83bb13..3a353b0f77 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -239,20 +239,11 @@ struct txq { int (*send_pending)(); #ifdef HAVE_VERBS_VLAN_INSERTION int (*send_pending_vlan)(); -#endif -#if MLX5_PMD_MAX_INLINE > 0 - int (*send_pending_inline)(); -#ifdef HAVE_VERBS_VLAN_INSERTION - int (*send_pending_inline_vlan)(); -#endif #endif int (*send_flush)(struct ibv_qp *qp); struct ibv_cq *cq; /* Completion Queue. */ struct ibv_qp *qp; /* Queue Pair. */ struct txq_elt (*elts)[]; /* TX elements. */ -#if MLX5_PMD_MAX_INLINE > 0 - uint32_t max_inline; /* Max inline send size <= MLX5_PMD_MAX_INLINE. */ -#endif unsigned int elts_n; /* (*elts)[] length. */ unsigned int elts_head; /* Current index in (*elts)[]. */ unsigned int elts_tail; /* First element awaiting completion. */ diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 59974c5857..946b7e308b 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -173,9 +173,6 @@ txq_cleanup(struct txq *txq) DEBUG("cleaning up %p", (void *)txq); txq_free_elts(txq); txq->poll_cnt = NULL; -#if MLX5_PMD_MAX_INLINE > 0 - txq->send_pending_inline = NULL; -#endif txq->send_flush = NULL; if (txq->if_qp != NULL) { assert(txq->priv != NULL); @@ -282,7 +279,8 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc, .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN, .res_domain = tmpl.rd, }; - tmpl.cq = ibv_exp_create_cq(priv->ctx, desc, NULL, NULL, 0, &attr.cq); + tmpl.cq = ibv_exp_create_cq(priv->ctx, desc, NULL, NULL, 0, + &attr.cq); if (tmpl.cq == NULL) { ret = ENOMEM; ERROR("%p: CQ creation failure: %s", @@ -305,9 +303,6 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc, desc), /* Max number of scatter/gather elements in a WR. */ .max_send_sge = 1, -#if MLX5_PMD_MAX_INLINE > 0 - .max_inline_data = MLX5_PMD_MAX_INLINE, -#endif }, .qp_type = IBV_QPT_RAW_PACKET, /* Do *NOT* enable this, completions events are managed per @@ -325,10 +320,6 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc, (void *)dev, strerror(ret)); goto error; } -#if MLX5_PMD_MAX_INLINE > 0 - /* ibv_create_qp() updates this value. */ - tmpl.max_inline = attr.init.cap.max_inline_data; -#endif attr.mod = (struct ibv_exp_qp_attr){ /* Move the QP to this state. */ .qp_state = IBV_QPS_INIT, @@ -403,12 +394,6 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc, txq_cleanup(txq); *txq = tmpl; txq->poll_cnt = txq->if_cq->poll_cnt; -#if MLX5_PMD_MAX_INLINE > 0 - txq->send_pending_inline = txq->if_qp->send_pending_inline; -#ifdef HAVE_VERBS_VLAN_INSERTION - txq->send_pending_inline_vlan = txq->if_qp->send_pending_inline_vlan; -#endif -#endif txq->send_pending = txq->if_qp->send_pending; #ifdef HAVE_VERBS_VLAN_INSERTION txq->send_pending_vlan = txq->if_qp->send_pending_vlan;