+static bool
+sfc_ef10_try_reap(struct sfc_ef10_txq * const txq, unsigned int added,
+ unsigned int needed_desc, unsigned int *dma_desc_space,
+ bool *reap_done)
+{
+ if (*reap_done)
+ return false;
+
+ if (added != txq->added) {
+ sfc_ef10_tx_qpush(txq, added, txq->added);
+ txq->added = added;
+ }
+
+ sfc_ef10_tx_reap(txq);
+ *reap_done = true;
+
+ /*
+ * Recalculate DMA descriptor space since Tx reap may change
+ * the number of completed descriptors
+ */
+ *dma_desc_space = txq->max_fill_level -
+ (added - txq->completed);
+
+ return (needed_desc <= *dma_desc_space);
+}
+
+static uint16_t
+sfc_ef10_prepare_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue);
+ uint16_t i;
+
+ for (i = 0; i < nb_pkts; i++) {
+ struct rte_mbuf *m = tx_pkts[i];
+ int ret;
+
+#ifdef RTE_LIBRTE_SFC_EFX_DEBUG
+ /*
+ * In non-TSO case, check that a packet segments do not exceed
+ * the size limit. Perform the check in debug mode since MTU
+ * more than 9k is not supported, but the limit here is 16k-1.
+ */
+ if (!(m->ol_flags & PKT_TX_TCP_SEG)) {
+ struct rte_mbuf *m_seg;
+
+ for (m_seg = m; m_seg != NULL; m_seg = m_seg->next) {
+ if (m_seg->data_len >
+ SFC_EF10_TX_DMA_DESC_LEN_MAX) {
+ rte_errno = EINVAL;
+ break;
+ }
+ }
+ }
+#endif
+ ret = sfc_dp_tx_prepare_pkt(m,
+ txq->tso_tcp_header_offset_limit,
+ txq->max_fill_level,
+ SFC_EF10_TSO_OPT_DESCS_NUM, 0);
+ if (unlikely(ret != 0)) {
+ rte_errno = ret;
+ break;
+ }
+ }
+
+ return i;
+}
+
+static int
+sfc_ef10_xmit_tso_pkt(struct sfc_ef10_txq * const txq, struct rte_mbuf *m_seg,
+ unsigned int *added, unsigned int *dma_desc_space,
+ bool *reap_done)
+{
+ size_t iph_off = ((m_seg->ol_flags & PKT_TX_TUNNEL_MASK) ?
+ m_seg->outer_l2_len + m_seg->outer_l3_len : 0) +
+ m_seg->l2_len;
+ size_t tcph_off = iph_off + m_seg->l3_len;
+ size_t header_len = tcph_off + m_seg->l4_len;
+ /* Offset of the payload in the last segment that contains the header */
+ size_t in_off = 0;
+ const struct rte_tcp_hdr *th;
+ uint16_t packet_id = 0;
+ uint16_t outer_packet_id = 0;
+ uint32_t sent_seq;
+ uint8_t *hdr_addr;
+ rte_iova_t hdr_iova;
+ struct rte_mbuf *first_m_seg = m_seg;
+ unsigned int pkt_start = *added;
+ unsigned int needed_desc;
+ struct rte_mbuf *m_seg_to_free_up_to = first_m_seg;
+ bool eop;
+
+ /*
+ * Preliminary estimation of required DMA descriptors, including extra
+ * descriptor for TSO header that is needed when the header is
+ * separated from payload in one segment. It does not include
+ * extra descriptors that may appear when a big segment is split across
+ * several descriptors.
+ */
+ needed_desc = m_seg->nb_segs +
+ (unsigned int)SFC_EF10_TSO_OPT_DESCS_NUM +
+ (unsigned int)SFC_EF10_TSO_HDR_DESCS_NUM;
+
+ if (needed_desc > *dma_desc_space &&
+ !sfc_ef10_try_reap(txq, pkt_start, needed_desc,
+ dma_desc_space, reap_done)) {
+ /*
+ * If a future Tx reap may increase available DMA descriptor
+ * space, do not try to send the packet.
+ */
+ if (txq->completed != pkt_start)
+ return ENOSPC;
+ /*
+ * Do not allow to send packet if the maximum DMA
+ * descriptor space is not sufficient to hold TSO
+ * descriptors, header descriptor and at least 1
+ * segment descriptor.
+ */
+ if (*dma_desc_space < SFC_EF10_TSO_OPT_DESCS_NUM +
+ SFC_EF10_TSO_HDR_DESCS_NUM + 1)
+ return EMSGSIZE;
+ }
+
+ /* Check if the header is not fragmented */
+ if (rte_pktmbuf_data_len(m_seg) >= header_len) {
+ hdr_addr = rte_pktmbuf_mtod(m_seg, uint8_t *);
+ hdr_iova = rte_mbuf_data_iova(m_seg);
+ if (rte_pktmbuf_data_len(m_seg) == header_len) {
+ /* Cannot send a packet that consists only of header */
+ if (unlikely(m_seg->next == NULL))
+ return EMSGSIZE;
+ /*
+ * Associate header mbuf with header descriptor
+ * which is located after TSO descriptors.
+ */
+ txq->sw_ring[(pkt_start + SFC_EF10_TSO_OPT_DESCS_NUM) &
+ txq->ptr_mask].mbuf = m_seg;
+ m_seg = m_seg->next;
+ in_off = 0;
+
+ /*
+ * If there is no payload offset (payload starts at the
+ * beginning of a segment) then an extra descriptor for
+ * separated header is not needed.
+ */
+ needed_desc--;
+ } else {
+ in_off = header_len;
+ }
+ } else {
+ unsigned int copied_segs;
+ unsigned int hdr_addr_off = (*added & txq->ptr_mask) *
+ SFC_TSOH_STD_LEN;
+
+ /*
+ * Discard a packet if header linearization is needed but
+ * the header is too big.
+ * Duplicate Tx prepare check here to avoid spoil of
+ * memory if Tx prepare is skipped.
+ */
+ if (unlikely(header_len > SFC_TSOH_STD_LEN))
+ return EMSGSIZE;
+
+ hdr_addr = txq->tsoh + hdr_addr_off;
+ hdr_iova = txq->tsoh_iova + hdr_addr_off;
+ copied_segs = sfc_tso_prepare_header(hdr_addr, header_len,
+ &m_seg, &in_off);
+
+ /* Cannot send a packet that consists only of header */
+ if (unlikely(m_seg == NULL))
+ return EMSGSIZE;
+
+ m_seg_to_free_up_to = m_seg;
+ /*
+ * Reduce the number of needed descriptors by the number of
+ * segments that entirely consist of header data.
+ */
+ needed_desc -= copied_segs;
+
+ /* Extra descriptor for separated header is not needed */
+ if (in_off == 0)
+ needed_desc--;
+ }
+
+ /*
+ * Tx prepare has debug-only checks that offload flags are correctly
+ * filled in in TSO mbuf. Use zero IPID if there is no IPv4 flag.
+ * If the packet is still IPv4, HW will simply start from zero IPID.
+ */
+ if (first_m_seg->ol_flags & PKT_TX_IPV4)
+ packet_id = sfc_tso_ip4_get_ipid(hdr_addr, iph_off);
+
+ if (first_m_seg->ol_flags & PKT_TX_OUTER_IPV4)
+ outer_packet_id = sfc_tso_ip4_get_ipid(hdr_addr,
+ first_m_seg->outer_l2_len);
+
+ th = (const struct rte_tcp_hdr *)(hdr_addr + tcph_off);
+ rte_memcpy(&sent_seq, &th->sent_seq, sizeof(uint32_t));
+ sent_seq = rte_be_to_cpu_32(sent_seq);
+
+ sfc_ef10_tx_qdesc_tso2_create(txq, *added, packet_id, outer_packet_id,
+ sent_seq, first_m_seg->tso_segsz);
+ (*added) += SFC_EF10_TSO_OPT_DESCS_NUM;
+
+ sfc_ef10_tx_qdesc_dma_create(hdr_iova, header_len, false,
+ &txq->txq_hw_ring[(*added) & txq->ptr_mask]);
+ (*added)++;
+
+ do {
+ rte_iova_t next_frag = rte_mbuf_data_iova(m_seg);
+ unsigned int seg_len = rte_pktmbuf_data_len(m_seg);
+ unsigned int id;
+
+ next_frag += in_off;
+ seg_len -= in_off;
+ in_off = 0;
+
+ do {
+ rte_iova_t frag_addr = next_frag;
+ size_t frag_len;
+
+ frag_len = RTE_MIN(seg_len,
+ SFC_EF10_TX_DMA_DESC_LEN_MAX);
+
+ next_frag += frag_len;
+ seg_len -= frag_len;
+
+ eop = (seg_len == 0 && m_seg->next == NULL);
+
+ id = (*added) & txq->ptr_mask;
+ (*added)++;
+
+ /*
+ * Initially we assume that one DMA descriptor is needed
+ * for every segment. When the segment is split across
+ * several DMA descriptors, increase the estimation.
+ */
+ needed_desc += (seg_len != 0);
+
+ /*
+ * When no more descriptors can be added, but not all
+ * segments are processed.
+ */
+ if (*added - pkt_start == *dma_desc_space &&
+ !eop &&
+ !sfc_ef10_try_reap(txq, pkt_start, needed_desc,
+ dma_desc_space, reap_done)) {
+ struct rte_mbuf *m;
+ struct rte_mbuf *m_next;
+
+ if (txq->completed != pkt_start) {
+ unsigned int i;
+
+ /*
+ * Reset mbuf associations with added
+ * descriptors.
+ */
+ for (i = pkt_start; i != *added; i++) {
+ id = i & txq->ptr_mask;
+ txq->sw_ring[id].mbuf = NULL;
+ }
+ return ENOSPC;
+ }
+
+ /* Free the segments that cannot be sent */
+ for (m = m_seg->next; m != NULL; m = m_next) {
+ m_next = m->next;
+ rte_pktmbuf_free_seg(m);
+ }
+ eop = true;
+ /* Ignore the rest of the segment */
+ seg_len = 0;
+ }
+
+ sfc_ef10_tx_qdesc_dma_create(frag_addr, frag_len,
+ eop, &txq->txq_hw_ring[id]);
+
+ } while (seg_len != 0);
+
+ txq->sw_ring[id].mbuf = m_seg;
+
+ m_seg = m_seg->next;
+ } while (!eop);
+
+ /*
+ * Free segments which content was entirely copied to the TSO header
+ * memory space of Tx queue
+ */
+ for (m_seg = first_m_seg; m_seg != m_seg_to_free_up_to;) {
+ struct rte_mbuf *seg_to_free = m_seg;
+
+ m_seg = m_seg->next;
+ rte_pktmbuf_free_seg(seg_to_free);
+ }
+
+ return 0;
+}
+