Tx offloads checks should be done in Tx prepare.
Signed-off-by: Igor Romanov <igor.romanov@oktetlabs.ru>
Signed-off-by: Andrew Rybchenko <arybchenko@solarflare.com>
const struct sfc_dp_tx *sfc_dp_tx_by_dp_txq(const struct sfc_dp_txq *dp_txq);
static inline int
-sfc_dp_tx_prepare_pkt(struct rte_mbuf *m)
+sfc_dp_tx_prepare_pkt(struct rte_mbuf *m,
+ uint32_t tso_tcp_header_offset_limit)
{
#ifdef RTE_LIBRTE_SFC_EFX_DEBUG
int ret;
SFC_ASSERT(ret < 0);
return -ret;
}
-#else
- RTE_SET_USED(m);
#endif
+ if (m->ol_flags & PKT_TX_TCP_SEG) {
+ unsigned int tcph_off = m->l2_len + m->l3_len;
+
+ if (unlikely(tcph_off > tso_tcp_header_offset_limit))
+ return EINVAL;
+ }
+
return 0;
}
}
static uint16_t
-sfc_ef10_prepare_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+sfc_ef10_prepare_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
+ struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue);
uint16_t i;
for (i = 0; i < nb_pkts; i++) {
}
}
#endif
- ret = sfc_dp_tx_prepare_pkt(m);
+ ret = sfc_dp_tx_prepare_pkt(m,
+ txq->tso_tcp_header_offset_limit);
if (unlikely(ret != 0)) {
rte_errno = ret;
break;
struct rte_mbuf *m_seg_to_free_up_to = first_m_seg;
bool eop;
- if (unlikely(tcph_off > txq->tso_tcp_header_offset_limit))
- return EMSGSIZE;
-
/*
* Preliminary estimation of required DMA descriptors, including extra
* descriptor for TSO header that is needed when the header is
size_t nh_off = m->l2_len; /* IP header offset */
size_t tcph_off = m->l2_len + m->l3_len; /* TCP header offset */
size_t header_len = m->l2_len + m->l3_len + m->l4_len;
- const efx_nic_cfg_t *encp = efx_nic_cfg_get(txq->evq->sa->nic);
idx += SFC_EF10_TSO_OPT_DESCS_NUM;
- /*
- * The TCP header must start at most 208 bytes into the frame.
- * If it starts later than this then the NIC won't realise
- * it's a TCP packet and TSO edits won't be applied
- */
- if (unlikely(tcph_off > encp->enc_tx_tso_tcp_header_offset_limit))
- return EMSGSIZE;
-
header_paddr = rte_pktmbuf_iova(m);
/*
}
static uint16_t
-sfc_efx_prepare_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+sfc_efx_prepare_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
+ struct sfc_dp_txq *dp_txq = tx_queue;
+ struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(txq->evq->sa->nic);
uint16_t i;
for (i = 0; i < nb_pkts; i++) {
int ret;
- ret = sfc_dp_tx_prepare_pkt(tx_pkts[i]);
+ ret = sfc_dp_tx_prepare_pkt(tx_pkts[i],
+ encp->enc_tx_tso_tcp_header_offset_limit);
if (unlikely(ret != 0)) {
rte_errno = ret;
break;
*/
if (sfc_efx_tso_do(txq, added, &m_seg, &in_off, &pend,
&pkt_descs, &pkt_len) != 0) {
- /* We may have reached this place for
- * one of the following reasons:
- *
- * 1) Packet header linearization is needed
- * and the header length is greater
- * than SFC_TSOH_STD_LEN
- * 2) TCP header starts at more then
- * 208 bytes into the frame
+ /* We may have reached this place if packet
+ * header linearization is needed but the
+ * header length is greater than
+ * SFC_TSOH_STD_LEN
*
* We will deceive RTE saying that we have sent
* the packet, but we will actually drop it.