+static uint16_t
+sfc_ef10_prepare_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue);
+ uint16_t i;
+
+ for (i = 0; i < nb_pkts; i++) {
+ struct rte_mbuf *m = tx_pkts[i];
+ int ret;
+
+#ifdef RTE_LIBRTE_SFC_EFX_DEBUG
+ /*
+ * In non-TSO case, check that a packet segments do not exceed
+ * the size limit. Perform the check in debug mode since MTU
+ * more than 9k is not supported, but the limit here is 16k-1.
+ */
+ if (!(m->ol_flags & PKT_TX_TCP_SEG)) {
+ struct rte_mbuf *m_seg;
+
+ for (m_seg = m; m_seg != NULL; m_seg = m_seg->next) {
+ if (m_seg->data_len >
+ SFC_EF10_TX_DMA_DESC_LEN_MAX) {
+ rte_errno = EINVAL;
+ break;
+ }
+ }
+ }
+#endif
+ ret = sfc_dp_tx_prepare_pkt(m, 0, SFC_TSOH_STD_LEN,
+ txq->tso_tcp_header_offset_limit,
+ txq->max_fill_level,
+ SFC_EF10_TSO_OPT_DESCS_NUM, 0);
+ if (unlikely(ret != 0)) {
+ rte_errno = ret;
+ break;
+ }
+ }
+
+ return i;
+}
+