X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_tx.h;h=1a359193715f8c3428504b7284d80e31f5feb1b5;hb=89a4bcb1fc7cacc76349013217d496711241e0f3;hp=34843d473f3b0aa29174d0b27ef39650976cc372;hpb=256fb0e38a85314b0a1f12c4b5cd58880d5965e0;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_tx.h b/drivers/net/mlx5/mlx5_tx.h index 34843d473f..1a35919371 100644 --- a/drivers/net/mlx5/mlx5_tx.h +++ b/drivers/net/mlx5/mlx5_tx.h @@ -62,10 +62,15 @@ enum mlx5_txcmp_code { #define MLX5_TXOFF_CONFIG(mask) (olx & MLX5_TXOFF_CONFIG_##mask) +#define MLX5_TXOFF_PRE_DECL(func) \ +uint16_t mlx5_tx_burst_##func(void *txq, \ + struct rte_mbuf **pkts, \ + uint16_t pkts_n) + #define MLX5_TXOFF_DECL(func, olx) \ -static uint16_t mlx5_tx_burst_##func(void *txq, \ - struct rte_mbuf **pkts, \ - uint16_t pkts_n) \ +uint16_t mlx5_tx_burst_##func(void *txq, \ + struct rte_mbuf **pkts, \ + uint16_t pkts_n) \ { \ return mlx5_tx_burst_tmpl((struct mlx5_txq_data *)txq, \ pkts, pkts_n, (olx)); \ @@ -237,6 +242,60 @@ uint32_t mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb); uint32_t mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr, struct rte_mempool *mp); +/* mlx5_tx_empw.c */ + +MLX5_TXOFF_PRE_DECL(full_empw); +MLX5_TXOFF_PRE_DECL(none_empw); +MLX5_TXOFF_PRE_DECL(md_empw); +MLX5_TXOFF_PRE_DECL(mt_empw); +MLX5_TXOFF_PRE_DECL(mtsc_empw); +MLX5_TXOFF_PRE_DECL(mti_empw); +MLX5_TXOFF_PRE_DECL(mtv_empw); +MLX5_TXOFF_PRE_DECL(mtiv_empw); +MLX5_TXOFF_PRE_DECL(sc_empw); +MLX5_TXOFF_PRE_DECL(sci_empw); +MLX5_TXOFF_PRE_DECL(scv_empw); +MLX5_TXOFF_PRE_DECL(sciv_empw); +MLX5_TXOFF_PRE_DECL(i_empw); +MLX5_TXOFF_PRE_DECL(v_empw); +MLX5_TXOFF_PRE_DECL(iv_empw); + +/* mlx5_tx_nompw.c */ + +MLX5_TXOFF_PRE_DECL(full); +MLX5_TXOFF_PRE_DECL(none); +MLX5_TXOFF_PRE_DECL(md); +MLX5_TXOFF_PRE_DECL(mt); +MLX5_TXOFF_PRE_DECL(mtsc); +MLX5_TXOFF_PRE_DECL(mti); +MLX5_TXOFF_PRE_DECL(mtv); +MLX5_TXOFF_PRE_DECL(mtiv); +MLX5_TXOFF_PRE_DECL(sc); +MLX5_TXOFF_PRE_DECL(sci); +MLX5_TXOFF_PRE_DECL(scv); +MLX5_TXOFF_PRE_DECL(sciv); +MLX5_TXOFF_PRE_DECL(i); +MLX5_TXOFF_PRE_DECL(v); +MLX5_TXOFF_PRE_DECL(iv); + +/* mlx5_tx_txpp.c */ + +MLX5_TXOFF_PRE_DECL(full_ts_nompw); +MLX5_TXOFF_PRE_DECL(full_ts_nompwi); +MLX5_TXOFF_PRE_DECL(full_ts); +MLX5_TXOFF_PRE_DECL(full_ts_noi); +MLX5_TXOFF_PRE_DECL(none_ts); +MLX5_TXOFF_PRE_DECL(mdi_ts); +MLX5_TXOFF_PRE_DECL(mti_ts); +MLX5_TXOFF_PRE_DECL(mtiv_ts); + +/* mlx5_tx_mpw.c */ + +MLX5_TXOFF_PRE_DECL(none_mpw); +MLX5_TXOFF_PRE_DECL(mci_mpw); +MLX5_TXOFF_PRE_DECL(mc_mpw); +MLX5_TXOFF_PRE_DECL(i_mpw); + static __rte_always_inline uint64_t * mlx5_tx_bfreg(struct mlx5_txq_data *txq) { @@ -1279,7 +1338,8 @@ mlx5_tx_eseg_mdat(struct mlx5_txq_data *__rte_restrict txq, * Copying may be interrupted inside the routine * if run into no inline hint flag. */ - copy = tlen >= txq->inlen_mode ? 0 : (txq->inlen_mode - tlen); + copy = tso ? inlen : txq->inlen_mode; + copy = tlen >= copy ? 0 : (copy - tlen); copy = mlx5_tx_mseg_memcpy(pdst, loc, part, copy, olx); tlen += copy; if (likely(inlen <= tlen) || copy < part) { @@ -1981,6 +2041,8 @@ mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq, unsigned int nxlen; uintptr_t start; + mbuf = loc->mbuf; + nxlen = rte_pktmbuf_data_len(mbuf); /* * Packet length exceeds the allowed inline data length, * check whether the minimal inlining is required. @@ -1990,28 +2052,23 @@ mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq, MLX5_ESEG_MIN_INLINE_SIZE); MLX5_ASSERT(txq->inlen_mode <= txq->inlen_send); inlen = txq->inlen_mode; - } else { - if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE || - !vlan || txq->vlan_en) { - /* - * VLAN insertion will be done inside by HW. - * It is not utmost effective - VLAN flag is - * checked twice, but we should proceed the - * inlining length correctly and take into - * account the VLAN header being inserted. - */ - return mlx5_tx_packet_multi_send - (txq, loc, olx); - } + } else if (vlan && !txq->vlan_en) { + /* + * VLAN insertion is requested and hardware does not + * support the offload, will do with software inline. + */ inlen = MLX5_ESEG_MIN_INLINE_SIZE; + } else if (mbuf->ol_flags & PKT_TX_DYNF_NOINLINE || + nxlen > txq->inlen_send) { + return mlx5_tx_packet_multi_send(txq, loc, olx); + } else { + goto do_first; } /* * Now we know the minimal amount of data is requested * to inline. Check whether we should inline the buffers * from the chain beginning to eliminate some mbufs. */ - mbuf = loc->mbuf; - nxlen = rte_pktmbuf_data_len(mbuf); if (unlikely(nxlen <= txq->inlen_send)) { /* We can inline first mbuf at least. */ if (nxlen < inlen) { @@ -2033,6 +2090,7 @@ mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq, goto do_align; } } +do_first: do { inlen = nxlen; mbuf = NEXT(mbuf);