}
#endif
-#define I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK 0x03
-#define I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID 0x01
-#define I40E_RX_DESC_EXT_STATUS_FLEXBH_FLEX 0x02
-#define I40E_RX_DESC_EXT_STATUS_FLEXBL_MASK 0x03
-#define I40E_RX_DESC_EXT_STATUS_FLEXBL_FLEX 0x01
-
static inline uint64_t
i40e_rxd_build_fdir(volatile union i40e_rx_desc *rxdp, struct rte_mbuf *mb)
{
return ctx_desc;
}
+/* HW requires that Tx buffer size ranges from 1B up to (16K-1)B. */
+#define I40E_MAX_DATA_PER_TXD \
+ (I40E_TXD_QW1_TX_BUF_SZ_MASK >> I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
+/* Calculate the number of TX descriptors needed for each pkt */
+static inline uint16_t
+i40e_calc_pkt_desc(struct rte_mbuf *tx_pkt)
+{
+ struct rte_mbuf *txd = tx_pkt;
+ uint16_t count = 0;
+
+ while (txd != NULL) {
+ count += DIV_ROUND_UP(txd->data_len, I40E_MAX_DATA_PER_TXD);
+ txd = txd->next;
+ }
+
+ return count;
+}
+
uint16_t
i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
* The number of descriptors that must be allocated for
* a packet equals to the number of the segments of that
* packet plus 1 context descriptor if needed.
+ * Recalculate the needed tx descs when TSO enabled in case
+ * the mbuf data size exceeds max data size that hw allows
+ * per tx desc.
*/
- nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
+ if (ol_flags & PKT_TX_TCP_SEG)
+ nb_used = (uint16_t)(i40e_calc_pkt_desc(tx_pkt) +
+ nb_ctx);
+ else
+ nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
tx_last = (uint16_t)(tx_id + nb_used - 1);
/* Circular ring */
slen = m_seg->data_len;
buf_dma_addr = rte_mbuf_data_iova(m_seg);
+ while ((ol_flags & PKT_TX_TCP_SEG) &&
+ unlikely(slen > I40E_MAX_DATA_PER_TXD)) {
+ txd->buffer_addr =
+ rte_cpu_to_le_64(buf_dma_addr);
+ txd->cmd_type_offset_bsz =
+ i40e_build_ctob(td_cmd,
+ td_offset, I40E_MAX_DATA_PER_TXD,
+ td_tag);
+
+ buf_dma_addr += I40E_MAX_DATA_PER_TXD;
+ slen -= I40E_MAX_DATA_PER_TXD;
+
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ txd = &txr[tx_id];
+ txn = &sw_ring[txe->next_id];
+ }
PMD_TX_LOG(DEBUG, "mbuf: %p, TDD[%u]:\n"
"buf_dma_addr: %#"PRIx64";\n"
"td_cmd: %#x;\n"
struct i40e_pf *pf = I40E_VSI_TO_PF(rxq->vsi);
struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
struct rte_eth_dev_data *data = pf->dev_data;
- uint16_t buf_size, len;
+ uint16_t buf_size;
buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
RTE_PKTMBUF_HEADROOM);
break;
}
- len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len;
- rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len);
+ rxq->max_pkt_len =
+ RTE_MIN((uint32_t)(hw->func_caps.rx_buf_chain_len *
+ rxq->rx_buf_len), data->dev_conf.rxmode.max_rx_pkt_len);
if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
}
}
+static const struct {
+ eth_rx_burst_t pkt_burst;
+ const char *info;
+} i40e_rx_burst_infos[] = {
+ { i40e_recv_scattered_pkts, "Scalar Scattered" },
+ { i40e_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" },
+ { i40e_recv_pkts, "Scalar" },
+#ifdef RTE_ARCH_X86
+ { i40e_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
+ { i40e_recv_pkts_vec_avx2, "Vector AVX2" },
+ { i40e_recv_scattered_pkts_vec, "Vector SSE Scattered" },
+ { i40e_recv_pkts_vec, "Vector SSE" },
+#elif defined(RTE_ARCH_ARM64)
+ { i40e_recv_scattered_pkts_vec, "Vector Neon Scattered" },
+ { i40e_recv_pkts_vec, "Vector Neon" },
+#elif defined(RTE_ARCH_PPC_64)
+ { i40e_recv_scattered_pkts_vec, "Vector AltiVec Scattered" },
+ { i40e_recv_pkts_vec, "Vector AltiVec" },
+#endif
+};
+
+int
+i40e_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
+ struct rte_eth_burst_mode *mode)
+{
+ eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
+ int ret = -EINVAL;
+ unsigned int i;
+
+ for (i = 0; i < RTE_DIM(i40e_rx_burst_infos); ++i) {
+ if (pkt_burst == i40e_rx_burst_infos[i].pkt_burst) {
+ snprintf(mode->info, sizeof(mode->info), "%s",
+ i40e_rx_burst_infos[i].info);
+ ret = 0;
+ break;
+ }
+ }
+
+ return ret;
+}
+
void __attribute__((cold))
i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
{
}
}
+static const struct {
+ eth_tx_burst_t pkt_burst;
+ const char *info;
+} i40e_tx_burst_infos[] = {
+ { i40e_xmit_pkts_simple, "Scalar Simple" },
+ { i40e_xmit_pkts, "Scalar" },
+#ifdef RTE_ARCH_X86
+ { i40e_xmit_pkts_vec_avx2, "Vector AVX2" },
+ { i40e_xmit_pkts_vec, "Vector SSE" },
+#elif defined(RTE_ARCH_ARM64)
+ { i40e_xmit_pkts_vec, "Vector Neon" },
+#elif defined(RTE_ARCH_PPC_64)
+ { i40e_xmit_pkts_vec, "Vector AltiVec" },
+#endif
+};
+
+int
+i40e_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
+ struct rte_eth_burst_mode *mode)
+{
+ eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
+ int ret = -EINVAL;
+ unsigned int i;
+
+ for (i = 0; i < RTE_DIM(i40e_tx_burst_infos); ++i) {
+ if (pkt_burst == i40e_tx_burst_infos[i].pkt_burst) {
+ snprintf(mode->info, sizeof(mode->info), "%s",
+ i40e_tx_burst_infos[i].info);
+ ret = 0;
+ break;
+ }
+ }
+
+ return ret;
+}
+
void __attribute__((cold))
i40e_set_default_ptype_table(struct rte_eth_dev *dev)
{