X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fice%2Fice_rxtx.h;h=99096e4c217f64393d966e70ea5fc5806c698303;hb=d654167641bf35601ff3ce0d545d3a6a32efc284;hp=9fa57b3b27fee245435d8f5362fcea44af038a99;hpb=6573453240608fa3276297f5e9f2397c74318316;p=dpdk.git diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h index 9fa57b3b27..99096e4c21 100644 --- a/drivers/net/ice/ice_rxtx.h +++ b/drivers/net/ice/ice_rxtx.h @@ -31,7 +31,7 @@ #define ICE_VPMD_RX_BURST 32 #define ICE_VPMD_TX_BURST 32 -#define ICE_RXQ_REARM_THRESH 32 +#define ICE_RXQ_REARM_THRESH 64 #define ICE_MAX_RX_BURST ICE_RXQ_REARM_THRESH #define ICE_TX_MAX_FREE_BUF_SZ 64 #define ICE_DESCS_PER_LOOP 4 @@ -42,6 +42,9 @@ typedef void (*ice_rx_release_mbufs_t)(struct ice_rx_queue *rxq); typedef void (*ice_tx_release_mbufs_t)(struct ice_tx_queue *txq); +typedef void (*ice_rxd_to_pkt_fields_t)(struct ice_rx_queue *rxq, + struct rte_mbuf *mb, + volatile union ice_rx_flex_desc *rxdp); struct ice_rx_entry { struct rte_mbuf *mbuf; @@ -68,7 +71,7 @@ struct ice_rx_queue { uint16_t rxrearm_start; /**< the idx we start the re-arming from */ uint64_t mbuf_initializer; /**< value to init mbufs */ - uint8_t port_id; /* device port ID */ + uint16_t port_id; /* device port ID */ uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */ uint8_t fdir_enabled; /* 0 if FDIR disabled, 1 when enabled */ uint16_t queue_id; /* RX queue index */ @@ -82,6 +85,8 @@ struct ice_rx_queue { bool q_set; /* indicate if rx queue has been configured */ bool rx_deferred_start; /* don't start this queue in dev start */ uint8_t proto_xtr; /* Protocol extraction from flexible descriptor */ + uint64_t xtr_ol_flag; /* Protocol extraction offload flag */ + ice_rxd_to_pkt_fields_t rxd_to_pkt_fields; /* handle FlexiMD by RXDID */ ice_rx_release_mbufs_t rx_rel_mbufs; }; @@ -91,6 +96,10 @@ struct ice_tx_entry { uint16_t last_id; }; +struct ice_vec_tx_entry { + struct rte_mbuf *mbuf; +}; + struct ice_tx_queue { uint16_t nb_tx_desc; /* number of TX descriptors */ rte_iova_t tx_ring_dma; /* TX ring DMA address */ @@ -112,7 +121,7 @@ struct ice_tx_queue { uint8_t pthresh; /**< Prefetch threshold register. */ uint8_t hthresh; /**< Host threshold register. */ uint8_t wthresh; /**< Write-back threshold reg. */ - uint8_t port_id; /* Device port identifier. */ + uint16_t port_id; /* Device port identifier. */ uint16_t queue_id; /* TX queue index. */ uint32_t q_teid; /* TX schedule node id. */ uint16_t reg_idx; @@ -225,6 +234,8 @@ int ice_rx_descriptor_status(void *rx_queue, uint16_t offset); int ice_tx_descriptor_status(void *tx_queue, uint16_t offset); void ice_set_default_ptype_table(struct rte_eth_dev *dev); const uint32_t *ice_dev_supported_ptypes_get(struct rte_eth_dev *dev); +void ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, + uint32_t rxdid); int ice_rx_vec_dev_check(struct rte_eth_dev *dev); int ice_tx_vec_dev_check(struct rte_eth_dev *dev); @@ -243,8 +254,16 @@ uint16_t ice_recv_scattered_pkts_vec_avx2(void *rx_queue, uint16_t nb_pkts); uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +uint16_t ice_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t ice_recv_scattered_pkts_vec_avx512(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); int ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc); int ice_tx_done_cleanup(void *txq, uint32_t free_cnt); +int ice_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc); #define FDIR_PARSING_ENABLE_PER_QUEUE(ad, on) do { \ int i; \