+/** Get Tx datapath ops by the datapath TxQ handle */
+const struct sfc_dp_tx *sfc_dp_tx_by_dp_txq(const struct sfc_dp_txq *dp_txq);
+
+static inline uint64_t
+sfc_dp_tx_offload_capa(const struct sfc_dp_tx *dp_tx)
+{
+ return dp_tx->dev_offload_capa | dp_tx->queue_offload_capa;
+}
+
+static inline unsigned int
+sfc_dp_tx_pkt_extra_hdr_segs(struct rte_mbuf **m_seg,
+ unsigned int *header_len_remaining)
+{
+ unsigned int nb_extra_header_segs = 0;
+
+ while (rte_pktmbuf_data_len(*m_seg) < *header_len_remaining) {
+ *header_len_remaining -= rte_pktmbuf_data_len(*m_seg);
+ *m_seg = (*m_seg)->next;
+ ++nb_extra_header_segs;
+ }
+
+ return nb_extra_header_segs;
+}
+
+static inline int
+sfc_dp_tx_prepare_pkt(struct rte_mbuf *m,
+ unsigned int max_nb_header_segs,
+ unsigned int tso_bounce_buffer_len,
+ uint32_t tso_tcp_header_offset_limit,
+ unsigned int max_fill_level,
+ unsigned int nb_tso_descs,
+ unsigned int nb_vlan_descs)
+{
+ unsigned int descs_required = m->nb_segs;
+ unsigned int tcph_off = ((m->ol_flags & PKT_TX_TUNNEL_MASK) ?
+ m->outer_l2_len + m->outer_l3_len : 0) +
+ m->l2_len + m->l3_len;
+ unsigned int header_len = tcph_off + m->l4_len;
+ unsigned int header_len_remaining = header_len;
+ unsigned int nb_header_segs = 1;
+ struct rte_mbuf *m_seg = m;
+
+#ifdef RTE_LIBRTE_SFC_EFX_DEBUG
+ int ret;
+
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ /*
+ * Negative error code is returned by rte_validate_tx_offload(),
+ * but positive are used inside net/sfc PMD.
+ */
+ SFC_ASSERT(ret < 0);
+ return -ret;
+ }
+#endif
+
+ if (max_nb_header_segs != 0) {
+ /* There is a limit on the number of header segments. */
+
+ nb_header_segs +=
+ sfc_dp_tx_pkt_extra_hdr_segs(&m_seg,
+ &header_len_remaining);
+
+ if (unlikely(nb_header_segs > max_nb_header_segs)) {
+ /*
+ * The number of header segments is too large.
+ *
+ * If TSO is requested and if the datapath supports
+ * linearisation of TSO headers, allow the packet
+ * to proceed with additional checks below.
+ * Otherwise, throw an error.
+ */
+ if ((m->ol_flags & PKT_TX_TCP_SEG) == 0 ||
+ tso_bounce_buffer_len == 0)
+ return EINVAL;
+ }
+ }
+
+ if (m->ol_flags & PKT_TX_TCP_SEG) {
+ switch (m->ol_flags & PKT_TX_TUNNEL_MASK) {
+ case 0:
+ break;
+ case PKT_TX_TUNNEL_VXLAN:
+ /* FALLTHROUGH */
+ case PKT_TX_TUNNEL_GENEVE:
+ if (!(m->ol_flags &
+ (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6)))
+ return EINVAL;
+ }
+
+ if (unlikely(tcph_off > tso_tcp_header_offset_limit))
+ return EINVAL;
+
+ descs_required += nb_tso_descs;
+
+ /*
+ * If headers segments are already counted above, here
+ * nothing is done since remaining length is smaller
+ * then current segment size.
+ */
+ nb_header_segs +=
+ sfc_dp_tx_pkt_extra_hdr_segs(&m_seg,
+ &header_len_remaining);
+
+ /*
+ * Extra descriptor which is required when (a part of) payload
+ * shares the same segment with (a part of) the header.
+ */
+ if (rte_pktmbuf_data_len(m_seg) > header_len_remaining)
+ descs_required++;
+
+ if (tso_bounce_buffer_len != 0) {
+ if (nb_header_segs > 1 &&
+ unlikely(header_len > tso_bounce_buffer_len)) {
+ /*
+ * Header linearization is required and
+ * the header is too big to be linearized
+ */
+ return EINVAL;
+ }
+ }
+ }
+
+ /*
+ * The number of VLAN descriptors is added regardless of requested
+ * VLAN offload since VLAN is sticky and sending packet without VLAN
+ * insertion may require VLAN descriptor to reset the sticky to 0.
+ */
+ descs_required += nb_vlan_descs;
+
+ /*
+ * Max fill level must be sufficient to hold all required descriptors
+ * to send the packet entirely.
+ */
+ if (descs_required > max_fill_level)
+ return ENOBUFS;
+
+ return 0;
+}
+