X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_rxtx.h;h=476ef62cfda295bd4e71049e226c553064243801;hb=cd3b124955d4673fa0ddd423ebc01a5adf9501d4;hp=69c718bcac673d71167a143815a27f91849b546c;hpb=5566a3e35866ce9e5eacf886c27b460ebfcd6ee9;p=dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h index 69c718bcac..476ef62cfd 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.h +++ b/drivers/net/ixgbe/ixgbe_rxtx.h @@ -33,7 +33,7 @@ #define RTE_IXGBE_DESCS_PER_LOOP 4 -#ifdef RTE_IXGBE_INC_VECTOR +#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM) #define RTE_IXGBE_RXQ_REARM_THRESH 32 #define RTE_IXGBE_MAX_RX_BURST RTE_IXGBE_RXQ_REARM_THRESH #endif @@ -53,6 +53,8 @@ #define IXGBE_TX_MAX_SEG 40 +#define IXGBE_TX_MIN_PKT_LEN 14 + #define IXGBE_PACKET_TYPE_MASK_82599 0X7F #define IXGBE_PACKET_TYPE_MASK_X550 0X10FF #define IXGBE_PACKET_TYPE_MASK_TUNNEL 0XFF @@ -111,11 +113,11 @@ struct ixgbe_rx_queue { uint16_t rx_free_trigger; /**< triggers rx buffer allocation */ uint8_t rx_using_sse; /**< indicates that vector RX is in use */ -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY uint8_t using_ipsec; /**< indicates that IPsec RX feature is in use */ #endif -#ifdef RTE_IXGBE_INC_VECTOR +#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM) uint16_t rxrearm_nb; /**< number of remaining to be re-armed */ uint16_t rxrearm_start; /**< the idx we start the re-arming from */ #endif @@ -127,8 +129,11 @@ struct ixgbe_rx_queue { uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */ uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */ uint8_t rx_deferred_start; /**< not in global dev start. */ + /** UDP frames with a 0 checksum can be marked as checksum errors. */ + uint8_t rx_udp_csum_zero_err; /** flags to set in mbuf when a vlan is detected. */ uint64_t vlan_flags; + uint64_t offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */ /** need to alloc dummy mbuf, for wraparound when scanning hw ring */ struct rte_mbuf fake_mbuf; /** hold packets to return to application */ @@ -158,7 +163,7 @@ union ixgbe_tx_offload { /* fields for TX offloading of tunnels */ uint64_t outer_l3_len:8; /**< Outer L3 (IP) Hdr Length. */ uint64_t outer_l2_len:8; /**< Outer L2 (MAC) Hdr Length. */ -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY /* inline ipsec related*/ uint64_t sa_idx:8; /**< TX SA database entry index */ uint64_t sec_pad_len:4; /**< padding length */ @@ -221,13 +226,13 @@ struct ixgbe_tx_queue { uint8_t pthresh; /**< Prefetch threshold register. */ uint8_t hthresh; /**< Host threshold register. */ uint8_t wthresh; /**< Write-back threshold reg. */ - uint32_t txq_flags; /**< Holds flags for this TXq */ + uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */ uint32_t ctx_curr; /**< Hardware context states. */ /** Hardware context0 history. */ struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM]; const struct ixgbe_txq_ops *ops; /**< txq ops */ uint8_t tx_deferred_start; /**< not in global dev start. */ -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY uint8_t using_ipsec; /**< indicates that IPsec TX feature is in use */ #endif @@ -239,20 +244,6 @@ struct ixgbe_txq_ops { void (*reset)(struct ixgbe_tx_queue *txq); }; -/* - * The "simple" TX queue functions require that the following - * flags are set when the TX queue is configured: - * - ETH_TXQ_FLAGS_NOMULTSEGS - * - ETH_TXQ_FLAGS_NOVLANOFFL - * - ETH_TXQ_FLAGS_NOXSUMSCTP - * - ETH_TXQ_FLAGS_NOXSUMUDP - * - ETH_TXQ_FLAGS_NOXSUMTCP - * and that the RS bit threshold (tx_rs_thresh) is at least equal to - * RTE_PMD_IXGBE_TX_MAX_BURST. - */ -#define IXGBE_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \ - ETH_TXQ_FLAGS_NOOFFLOADS) - /* * Populate descriptors with the following info: * 1.) buffer_addr = phys_addr + headroom @@ -289,6 +280,7 @@ void ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq); */ void ixgbe_set_rx_function(struct rte_eth_dev *dev); +int ixgbe_check_supported_loopback_mode(struct rte_eth_dev *dev); uint16_t ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); uint16_t ixgbe_recv_scattered_pkts_vec(void *rx_queue, @@ -296,15 +288,19 @@ uint16_t ixgbe_recv_scattered_pkts_vec(void *rx_queue, int ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev); int ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq); void ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq); +int ixgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt); extern const uint32_t ptype_table[IXGBE_PACKET_TYPE_MAX]; extern const uint32_t ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX]; -#ifdef RTE_IXGBE_INC_VECTOR - uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq); -#endif /* RTE_IXGBE_INC_VECTOR */ +uint64_t ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev); +uint64_t ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev); +uint64_t ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev); +uint64_t ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev); +int ixgbe_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc); + #endif /* _IXGBE_RXTX_H_ */