return ctx_desc;
}
+/* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */
+#define ICE_MAX_DATA_PER_TXD \
+ (ICE_TXD_QW1_TX_BUF_SZ_M >> ICE_TXD_QW1_TX_BUF_SZ_S)
+/* Calculate the number of TX descriptors needed for each pkt */
+static inline uint16_t
+ice_calc_pkt_desc(struct rte_mbuf *tx_pkt)
+{
+ struct rte_mbuf *txd = tx_pkt;
+ uint16_t count = 0;
+
+ while (txd != NULL) {
+ count += DIV_ROUND_UP(txd->data_len, ICE_MAX_DATA_PER_TXD);
+ txd = txd->next;
+ }
+
+ return count;
+}
+
uint16_t
ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
uint32_t td_offset = 0;
uint32_t td_tag = 0;
uint16_t tx_last;
+ uint16_t slen;
uint64_t buf_dma_addr;
uint64_t ol_flags;
union ice_tx_offload tx_offload = {0};
/* The number of descriptors that must be allocated for
* a packet equals to the number of the segments of that
* packet plus the number of context descriptor if needed.
+ * Recalculate the needed tx descs when TSO enabled in case
+ * the mbuf data size exceeds max data size that hw allows
+ * per tx desc.
*/
- nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
+ if (ol_flags & PKT_TX_TCP_SEG)
+ nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) +
+ nb_ctx);
+ else
+ nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
tx_last = (uint16_t)(tx_id + nb_used - 1);
/* Circular ring */
txe->mbuf = m_seg;
/* Setup TX Descriptor */
+ slen = m_seg->data_len;
buf_dma_addr = rte_mbuf_data_iova(m_seg);
+
+ while ((ol_flags & PKT_TX_TCP_SEG) &&
+ unlikely(slen > ICE_MAX_DATA_PER_TXD)) {
+ txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
+ txd->cmd_type_offset_bsz =
+ rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
+ ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
+ ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
+ ((uint64_t)ICE_MAX_DATA_PER_TXD <<
+ ICE_TXD_QW1_TX_BUF_SZ_S) |
+ ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
+
+ buf_dma_addr += ICE_MAX_DATA_PER_TXD;
+ slen -= ICE_MAX_DATA_PER_TXD;
+
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ txd = &tx_ring[tx_id];
+ txn = &sw_ring[txe->next_id];
+ }
+
txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
txd->cmd_type_offset_bsz =
rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
- ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
+ ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
- ((uint64_t)m_seg->data_len <<
- ICE_TXD_QW1_TX_BUF_SZ_S) |
- ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
+ ((uint64_t)slen << ICE_TXD_QW1_TX_BUF_SZ_S) |
+ ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
txe->last_id = tx_last;
tx_id = txe->next_id;