+static bool
+sfc_ef10_try_reap(struct sfc_ef10_txq * const txq, unsigned int added,
+ unsigned int needed_desc, unsigned int *dma_desc_space,
+ bool *reap_done)
+{
+ if (*reap_done)
+ return false;
+
+ if (added != txq->added) {
+ sfc_ef10_tx_qpush(txq, added, txq->added);
+ txq->added = added;
+ }
+
+ sfc_ef10_tx_reap(txq);
+ *reap_done = true;
+
+ /*
+ * Recalculate DMA descriptor space since Tx reap may change
+ * the number of completed descriptors
+ */
+ *dma_desc_space = txq->max_fill_level -
+ (added - txq->completed);
+
+ return (needed_desc <= *dma_desc_space);
+}
+
+static int
+sfc_ef10_xmit_tso_pkt(struct sfc_ef10_txq * const txq, struct rte_mbuf *m_seg,
+ unsigned int *added, unsigned int *dma_desc_space,
+ bool *reap_done)
+{
+ size_t iph_off = m_seg->l2_len;
+ size_t tcph_off = m_seg->l2_len + m_seg->l3_len;
+ size_t header_len = m_seg->l2_len + m_seg->l3_len + m_seg->l4_len;
+ /* Offset of the payload in the last segment that contains the header */
+ size_t in_off = 0;
+ const struct tcp_hdr *th;
+ uint16_t packet_id;
+ uint32_t sent_seq;
+ uint8_t *hdr_addr;
+ rte_iova_t hdr_iova;
+ struct rte_mbuf *first_m_seg = m_seg;
+ unsigned int pkt_start = *added;
+ unsigned int needed_desc;
+ struct rte_mbuf *m_seg_to_free_up_to = first_m_seg;
+ bool eop;
+
+ /* Both checks may be done, so use bit OR to have only one branching */
+ if (unlikely((header_len > SFC_TSOH_STD_LEN) |
+ (tcph_off > txq->tso_tcp_header_offset_limit)))
+ return EMSGSIZE;
+
+ /*
+ * Preliminary estimation of required DMA descriptors, including extra
+ * descriptor for TSO header that is needed when the header is
+ * separated from payload in one segment. It does not include
+ * extra descriptors that may appear when a big segment is split across
+ * several descriptors.
+ */
+ needed_desc = m_seg->nb_segs +
+ (unsigned int)SFC_TSO_OPT_DESCS_NUM +
+ (unsigned int)SFC_TSO_HDR_DESCS_NUM;
+
+ if (needed_desc > *dma_desc_space &&
+ !sfc_ef10_try_reap(txq, pkt_start, needed_desc,
+ dma_desc_space, reap_done)) {
+ /*
+ * If a future Tx reap may increase available DMA descriptor
+ * space, do not try to send the packet.
+ */
+ if (txq->completed != pkt_start)
+ return ENOSPC;
+ /*
+ * Do not allow to send packet if the maximum DMA
+ * descriptor space is not sufficient to hold TSO
+ * descriptors, header descriptor and at least 1
+ * segment descriptor.
+ */
+ if (*dma_desc_space < SFC_TSO_OPT_DESCS_NUM +
+ SFC_TSO_HDR_DESCS_NUM + 1)
+ return EMSGSIZE;
+ }
+
+ /* Check if the header is not fragmented */
+ if (rte_pktmbuf_data_len(m_seg) >= header_len) {
+ hdr_addr = rte_pktmbuf_mtod(m_seg, uint8_t *);
+ hdr_iova = rte_mbuf_data_iova(m_seg);
+ if (rte_pktmbuf_data_len(m_seg) == header_len) {
+ /* Cannot send a packet that consists only of header */
+ if (unlikely(m_seg->next == NULL))
+ return EMSGSIZE;
+ /*
+ * Associate header mbuf with header descriptor
+ * which is located after TSO descriptors.
+ */
+ txq->sw_ring[(pkt_start + SFC_TSO_OPT_DESCS_NUM) &
+ txq->ptr_mask].mbuf = m_seg;
+ m_seg = m_seg->next;
+ in_off = 0;
+
+ /*
+ * If there is no payload offset (payload starts at the
+ * beginning of a segment) then an extra descriptor for
+ * separated header is not needed.
+ */
+ needed_desc--;
+ } else {
+ in_off = header_len;
+ }
+ } else {
+ unsigned int copied_segs;
+ unsigned int hdr_addr_off = (*added & txq->ptr_mask) *
+ SFC_TSOH_STD_LEN;
+
+ hdr_addr = txq->tsoh + hdr_addr_off;
+ hdr_iova = txq->tsoh_iova + hdr_addr_off;
+ copied_segs = sfc_tso_prepare_header(hdr_addr, header_len,
+ &m_seg, &in_off);
+
+ /* Cannot send a packet that consists only of header */
+ if (unlikely(m_seg == NULL))
+ return EMSGSIZE;
+
+ m_seg_to_free_up_to = m_seg;
+ /*
+ * Reduce the number of needed descriptors by the number of
+ * segments that entirely consist of header data.
+ */
+ needed_desc -= copied_segs;
+
+ /* Extra descriptor for separated header is not needed */
+ if (in_off == 0)
+ needed_desc--;
+ }
+
+ switch (first_m_seg->ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) {
+ case PKT_TX_IPV4: {
+ const struct ipv4_hdr *iphe4;
+
+ iphe4 = (const struct ipv4_hdr *)(hdr_addr + iph_off);
+ rte_memcpy(&packet_id, &iphe4->packet_id, sizeof(uint16_t));
+ packet_id = rte_be_to_cpu_16(packet_id);
+ break;
+ }
+ case PKT_TX_IPV6:
+ packet_id = 0;
+ break;
+ default:
+ return EINVAL;
+ }
+
+ th = (const struct tcp_hdr *)(hdr_addr + tcph_off);
+ rte_memcpy(&sent_seq, &th->sent_seq, sizeof(uint32_t));
+ sent_seq = rte_be_to_cpu_32(sent_seq);
+
+ sfc_ef10_tx_qdesc_tso2_create(txq, *added, packet_id, 0, sent_seq,
+ first_m_seg->tso_segsz);
+ (*added) += SFC_TSO_OPT_DESCS_NUM;
+
+ sfc_ef10_tx_qdesc_dma_create(hdr_iova, header_len, false,
+ &txq->txq_hw_ring[(*added) & txq->ptr_mask]);
+ (*added)++;
+
+ do {
+ rte_iova_t next_frag = rte_mbuf_data_iova(m_seg);
+ unsigned int seg_len = rte_pktmbuf_data_len(m_seg);
+ unsigned int id;
+
+ next_frag += in_off;
+ seg_len -= in_off;
+ in_off = 0;
+
+ do {
+ rte_iova_t frag_addr = next_frag;
+ size_t frag_len;
+
+ frag_len = RTE_MIN(seg_len,
+ SFC_EF10_TX_DMA_DESC_LEN_MAX);
+
+ next_frag += frag_len;
+ seg_len -= frag_len;
+
+ eop = (seg_len == 0 && m_seg->next == NULL);
+
+ id = (*added) & txq->ptr_mask;
+ (*added)++;
+
+ /*
+ * Initially we assume that one DMA descriptor is needed
+ * for every segment. When the segment is split across
+ * several DMA descriptors, increase the estimation.
+ */
+ needed_desc += (seg_len != 0);
+
+ /*
+ * When no more descriptors can be added, but not all
+ * segments are processed.
+ */
+ if (*added - pkt_start == *dma_desc_space &&
+ !eop &&
+ !sfc_ef10_try_reap(txq, pkt_start, needed_desc,
+ dma_desc_space, reap_done)) {
+ struct rte_mbuf *m;
+ struct rte_mbuf *m_next;
+
+ if (txq->completed != pkt_start) {
+ unsigned int i;
+
+ /*
+ * Reset mbuf associations with added
+ * descriptors.
+ */
+ for (i = pkt_start; i != *added; i++) {
+ id = i & txq->ptr_mask;
+ txq->sw_ring[id].mbuf = NULL;
+ }
+ return ENOSPC;
+ }
+
+ /* Free the segments that cannot be sent */
+ for (m = m_seg->next; m != NULL; m = m_next) {
+ m_next = m->next;
+ rte_pktmbuf_free_seg(m);
+ }
+ eop = true;
+ /* Ignore the rest of the segment */
+ seg_len = 0;
+ }
+
+ sfc_ef10_tx_qdesc_dma_create(frag_addr, frag_len,
+ eop, &txq->txq_hw_ring[id]);
+
+ } while (seg_len != 0);
+
+ txq->sw_ring[id].mbuf = m_seg;
+
+ m_seg = m_seg->next;
+ } while (!eop);
+
+ /*
+ * Free segments which content was entirely copied to the TSO header
+ * memory space of Tx queue
+ */
+ for (m_seg = first_m_seg; m_seg != m_seg_to_free_up_to;) {
+ struct rte_mbuf *seg_to_free = m_seg;
+
+ m_seg = m_seg->next;
+ rte_pktmbuf_free_seg(seg_to_free);
+ }
+
+ return 0;
+}
+