#ifndef _IXGBE_RXTX_H_
#define _IXGBE_RXTX_H_
+/*
+ * Rings setup and release.
+ *
+ * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
+ * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
+ * also optimize cache line size effect. H/W supports up to cache line size 128.
+ */
+#define IXGBE_ALIGN 128
+
+#define IXGBE_RXD_ALIGN (IXGBE_ALIGN / sizeof(union ixgbe_adv_rx_desc))
+#define IXGBE_TXD_ALIGN (IXGBE_ALIGN / sizeof(union ixgbe_adv_tx_desc))
+
+/*
+ * Maximum number of Ring Descriptors.
+ *
+ * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
+ * descriptors should meet the following condition:
+ * (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0
+ */
+#define IXGBE_MIN_RING_DESC 32
+#define IXGBE_MAX_RING_DESC 4096
#define RTE_PMD_IXGBE_TX_MAX_BURST 32
#define RTE_PMD_IXGBE_RX_MAX_BURST 32
+#define RTE_IXGBE_TX_MAX_FREE_BUF_SZ 64
#define RTE_IXGBE_DESCS_PER_LOOP 4
-#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
-
-#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
- (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
-
#ifdef RTE_IXGBE_INC_VECTOR
-#define RTE_IXGBE_VPMD_RX_BURST 32
-#define RTE_IXGBE_VPMD_TX_BURST 32
-#define RTE_IXGBE_RXQ_REARM_THRESH RTE_IXGBE_VPMD_RX_BURST
-#define RTE_IXGBE_TX_MAX_FREE_BUF_SZ 64
+#define RTE_IXGBE_RXQ_REARM_THRESH 32
+#define RTE_IXGBE_MAX_RX_BURST RTE_IXGBE_RXQ_REARM_THRESH
#endif
#define RX_RING_SZ ((IXGBE_MAX_RING_DESC + RTE_IXGBE_DESCS_PER_LOOP - 1) * \
/** Offload features */
union ixgbe_tx_offload {
- uint64_t data;
+ uint64_t data[2];
struct {
uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
uint64_t l3_len:9; /**< L3 (IP) Header Length. */
uint64_t tso_segsz:16; /**< TCP TSO segment size */
uint64_t vlan_tci:16;
/**< VLAN Tag Control Identifier (CPU order). */
+
+ /* fields for TX offloading of tunnels */
+ uint64_t outer_l3_len:8; /**< Outer L3 (IP) Hdr Length. */
+ uint64_t outer_l2_len:8; /**< Outer L2 (MAC) Hdr Length. */
};
};