struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
};
-/**
- * continuous entry sequence, gather by the same mempool
- */
-struct igb_tx_entry_seq {
- const struct rte_mempool* pool;
- uint32_t same_pool;
-};
-
/**
* Structure associated with each RX queue.
*/
uint8_t port_id; /**< Device port identifier. */
uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
- uint8_t start_rx_per_q;
+ uint8_t rx_deferred_start; /**< not in global dev start. */
#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
struct rte_mbuf fake_mbuf;
};
/** Offload features */
-union ixgbe_vlan_macip {
- uint32_t data;
+union ixgbe_tx_offload {
+ uint64_t data;
struct {
- uint16_t l2_l3_len; /**< combined 9-bit l3, 7-bit l2 lengths */
- uint16_t vlan_tci;
+ uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
+ uint64_t l3_len:9; /**< L3 (IP) Header Length. */
+ uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
+ uint64_t tso_segsz:16; /**< TCP TSO segment size */
+ uint64_t vlan_tci:16;
/**< VLAN Tag Control Identifier (CPU order). */
- } f;
+ };
};
/*
struct ixgbe_advctx_info {
uint64_t flags; /**< ol_flags for context build. */
- uint32_t cmp_mask; /**< compare mask for vlan_macip_lens */
- union ixgbe_vlan_macip vlan_macip_lens; /**< vlan, mac ip length. */
+ /**< tx offload: vlan, tso, l2-l3-l4 lengths. */
+ union ixgbe_tx_offload tx_offload;
+ /** compare mask for tx offload. */
+ union ixgbe_tx_offload tx_offload_mask;
};
/**
volatile union ixgbe_adv_tx_desc *tx_ring;
uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */
-#ifdef RTE_IXGBE_INC_VECTOR
- /** continuous tx entry sequence within the same mempool */
- struct igb_tx_entry_seq *sw_ring_seq;
-#endif
volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
uint16_t nb_tx_desc; /**< number of TX descriptors. */
uint16_t tx_tail; /**< current value of TDT reg. */
/** Hardware context0 history. */
struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
struct ixgbe_txq_ops *ops; /**< txq ops */
- uint8_t start_tx_per_q;
+ uint8_t tx_deferred_start; /**< not in global dev start. */
};
struct ixgbe_txq_ops {
IXGBE_ADVTXD_DCMD_EOP)
#ifdef RTE_IXGBE_INC_VECTOR
-uint16_t ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
-uint16_t ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
-int ixgbe_txq_vec_setup(struct igb_tx_queue *txq, unsigned int socket_id);
+uint16_t ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t ixgbe_recv_scattered_pkts_vec(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+int ixgbe_txq_vec_setup(struct igb_tx_queue *txq);
int ixgbe_rxq_vec_setup(struct igb_rx_queue *rxq);
int ixgbe_rx_vec_condition_check(struct rte_eth_dev *dev);
#endif