From f7a66f9365fe442d743639d50638a8439e712b96 Mon Sep 17 00:00:00 2001 From: Igor Romanov Date: Tue, 2 Apr 2019 10:28:40 +0100 Subject: [PATCH] net/sfc: introduce descriptor space check in Tx prepare Add descriptor space check to Tx prepare function to inform a caller that a packet that needs more than maximum Tx descriptors of a queue can not be sent. Signed-off-by: Igor Romanov Signed-off-by: Andrew Rybchenko --- drivers/net/sfc/sfc_dp_tx.h | 31 ++++++++++++++++++++++++++++++- drivers/net/sfc/sfc_ef10_tx.c | 4 +++- drivers/net/sfc/sfc_tx.c | 9 ++++++++- 3 files changed, 41 insertions(+), 3 deletions(-) diff --git a/drivers/net/sfc/sfc_dp_tx.h b/drivers/net/sfc/sfc_dp_tx.h index c42d0d01fe..ebc941857b 100644 --- a/drivers/net/sfc/sfc_dp_tx.h +++ b/drivers/net/sfc/sfc_dp_tx.h @@ -196,8 +196,13 @@ const struct sfc_dp_tx *sfc_dp_tx_by_dp_txq(const struct sfc_dp_txq *dp_txq); static inline int sfc_dp_tx_prepare_pkt(struct rte_mbuf *m, - uint32_t tso_tcp_header_offset_limit) + uint32_t tso_tcp_header_offset_limit, + unsigned int max_fill_level, + unsigned int nb_tso_descs, + unsigned int nb_vlan_descs) { + unsigned int descs_required = m->nb_segs; + #ifdef RTE_LIBRTE_SFC_EFX_DEBUG int ret; @@ -214,11 +219,35 @@ sfc_dp_tx_prepare_pkt(struct rte_mbuf *m, if (m->ol_flags & PKT_TX_TCP_SEG) { unsigned int tcph_off = m->l2_len + m->l3_len; + unsigned int header_len = tcph_off + m->l4_len; if (unlikely(tcph_off > tso_tcp_header_offset_limit)) return EINVAL; + + descs_required += nb_tso_descs; + + /* + * Extra descriptor that is required when a packet header + * is separated from remaining content of the first segment. + */ + if (rte_pktmbuf_data_len(m) > header_len) + descs_required++; } + /* + * The number of VLAN descriptors is added regardless of requested + * VLAN offload since VLAN is sticky and sending packet without VLAN + * insertion may require VLAN descriptor to reset the sticky to 0. + */ + descs_required += nb_vlan_descs; + + /* + * Max fill level must be sufficient to hold all required descriptors + * to send the packet entirely. + */ + if (descs_required > max_fill_level) + return ENOBUFS; + return 0; } diff --git a/drivers/net/sfc/sfc_ef10_tx.c b/drivers/net/sfc/sfc_ef10_tx.c index 3d6ba4292b..e7ab993ddf 100644 --- a/drivers/net/sfc/sfc_ef10_tx.c +++ b/drivers/net/sfc/sfc_ef10_tx.c @@ -349,7 +349,9 @@ sfc_ef10_prepare_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } #endif ret = sfc_dp_tx_prepare_pkt(m, - txq->tso_tcp_header_offset_limit); + txq->tso_tcp_header_offset_limit, + txq->max_fill_level, + SFC_EF10_TSO_OPT_DESCS_NUM, 0); if (unlikely(ret != 0)) { rte_errno = ret; break; diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c index e128bff908..4037802e6a 100644 --- a/drivers/net/sfc/sfc_tx.c +++ b/drivers/net/sfc/sfc_tx.c @@ -709,8 +709,15 @@ sfc_efx_prepare_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, for (i = 0; i < nb_pkts; i++) { int ret; + /* + * EFX Tx datapath may require extra VLAN descriptor if VLAN + * insertion offload is requested regardless the offload + * requested/supported. + */ ret = sfc_dp_tx_prepare_pkt(tx_pkts[i], - encp->enc_tx_tso_tcp_header_offset_limit); + encp->enc_tx_tso_tcp_header_offset_limit, + txq->max_fill_level, EFX_TX_FATSOV2_OPT_NDESCS, + 1); if (unlikely(ret != 0)) { rte_errno = ret; break; -- 2.20.1