X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fice%2Fice_rxtx.h;h=1c23c7541e91871e12fbc002ffe8912649b3d2e3;hb=62024eb8275696bead35b38a6062a2513f1f7c58;hp=64e9f20dd218b01be90cc1982bb301cd14563149;hpb=2d5f6953d56d13a6cea916d5d8384bfc07041991;p=dpdk.git diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h index 64e9f20dd2..1c23c7541e 100644 --- a/drivers/net/ice/ice_rxtx.h +++ b/drivers/net/ice/ice_rxtx.h @@ -20,9 +20,9 @@ #define ICE_CHK_Q_ENA_INTERVAL_US 100 #ifdef RTE_LIBRTE_ICE_16BYTE_RX_DESC -#define ice_rx_desc ice_16byte_rx_desc +#define ice_rx_flex_desc ice_16b_rx_flex_desc #else -#define ice_rx_desc ice_32byte_rx_desc +#define ice_rx_flex_desc ice_32b_rx_flex_desc #endif #define ICE_SUPPORT_CHAIN_NUM 5 @@ -36,8 +36,15 @@ #define ICE_TX_MAX_FREE_BUF_SZ 64 #define ICE_DESCS_PER_LOOP 4 +#define ICE_FDIR_PKT_LEN 512 + +#define ICE_RXDID_COMMS_OVS 22 + typedef void (*ice_rx_release_mbufs_t)(struct ice_rx_queue *rxq); typedef void (*ice_tx_release_mbufs_t)(struct ice_tx_queue *txq); +typedef void (*ice_rxd_to_pkt_fields_t)(struct ice_rx_queue *rxq, + struct rte_mbuf *mb, + volatile union ice_rx_flex_desc *rxdp); struct ice_rx_entry { struct rte_mbuf *mbuf; @@ -45,8 +52,8 @@ struct ice_rx_entry { struct ice_rx_queue { struct rte_mempool *mp; /* mbuf pool to populate RX ring */ - volatile union ice_rx_desc *rx_ring;/* RX ring virtual address */ - uint64_t rx_ring_phys_addr; /* RX ring DMA address */ + volatile union ice_rx_flex_desc *rx_ring;/* RX ring virtual address */ + rte_iova_t rx_ring_dma; /* RX ring DMA address */ struct ice_rx_entry *sw_ring; /* address of RX soft ring */ uint16_t nb_rx_desc; /* number of RX descriptors */ uint16_t rx_free_thresh; /* max free RX desc to hold */ @@ -64,8 +71,9 @@ struct ice_rx_queue { uint16_t rxrearm_start; /**< the idx we start the re-arming from */ uint64_t mbuf_initializer; /**< value to init mbufs */ - uint8_t port_id; /* device port ID */ + uint16_t port_id; /* device port ID */ uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */ + uint8_t fdir_enabled; /* 0 if FDIR disabled, 1 when enabled */ uint16_t queue_id; /* RX queue index */ uint16_t reg_idx; /* RX queue register index */ uint8_t drop_en; /* if not 0, set register bit */ @@ -76,6 +84,9 @@ struct ice_rx_queue { uint16_t max_pkt_len; /* Maximum packet length */ bool q_set; /* indicate if rx queue has been configured */ bool rx_deferred_start; /* don't start this queue in dev start */ + uint8_t proto_xtr; /* Protocol extraction from flexible descriptor */ + uint64_t xtr_ol_flag; /* Protocol extraction offload flag */ + ice_rxd_to_pkt_fields_t rxd_to_pkt_fields; /* handle FlexiMD by RXDID */ ice_rx_release_mbufs_t rx_rel_mbufs; }; @@ -87,7 +98,7 @@ struct ice_tx_entry { struct ice_tx_queue { uint16_t nb_tx_desc; /* number of TX descriptors */ - uint64_t tx_ring_phys_addr; /* TX ring DMA address */ + rte_iova_t tx_ring_dma; /* TX ring DMA address */ volatile struct ice_tx_desc *tx_ring; /* TX ring virtual address */ struct ice_tx_entry *sw_ring; /* virtual address of SW ring */ uint16_t tx_tail; /* current value of tail register */ @@ -106,7 +117,7 @@ struct ice_tx_queue { uint8_t pthresh; /**< Prefetch threshold register. */ uint8_t hthresh; /**< Host threshold register. */ uint8_t wthresh; /**< Write-back threshold reg. */ - uint8_t port_id; /* Device port identifier. */ + uint16_t port_id; /* Device port identifier. */ uint16_t queue_id; /* TX queue index. */ uint32_t q_teid; /* TX schedule node id. */ uint16_t reg_idx; @@ -132,6 +143,46 @@ union ice_tx_offload { }; }; +/* Rx Flex Descriptor for Comms Package Profile + * RxDID Profile ID 22 (swap Hash and FlowID) + * Flex-field 0: Flow ID lower 16-bits + * Flex-field 1: Flow ID upper 16-bits + * Flex-field 2: RSS hash lower 16-bits + * Flex-field 3: RSS hash upper 16-bits + * Flex-field 4: AUX0 + * Flex-field 5: AUX1 + */ +struct ice_32b_rx_flex_desc_comms_ovs { + /* Qword 0 */ + u8 rxdid; + u8 mir_id_umb_cast; + __le16 ptype_flexi_flags0; + __le16 pkt_len; + __le16 hdr_len_sph_flex_flags1; + + /* Qword 1 */ + __le16 status_error0; + __le16 l2tag1; + __le32 flow_id; + + /* Qword 2 */ + __le16 status_error1; + u8 flexi_flags2; + u8 ts_low; + __le16 l2tag2_1st; + __le16 l2tag2_2nd; + + /* Qword 3 */ + __le32 rss_hash; + union { + struct { + __le16 aux0; + __le16 aux1; + } flex; + __le32 ts_high; + } flex_ts; +}; + int ice_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, @@ -147,10 +198,15 @@ int ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); int ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); int ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); int ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); +int ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); +int ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); +int ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); +int ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); void ice_rx_queue_release(void *rxq); void ice_tx_queue_release(void *txq); -void ice_clear_queues(struct rte_eth_dev *dev); void ice_free_queues(struct rte_eth_dev *dev); +int ice_fdir_setup_tx_resources(struct ice_pf *pf); +int ice_fdir_setup_rx_resources(struct ice_pf *pf); uint16_t ice_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); uint16_t ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, @@ -158,12 +214,18 @@ uint16_t ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, void ice_set_rx_function(struct rte_eth_dev *dev); uint16_t ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +void ice_set_tx_function_flag(struct rte_eth_dev *dev, + struct ice_tx_queue *txq); void ice_set_tx_function(struct rte_eth_dev *dev); uint32_t ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id); void ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, struct rte_eth_rxq_info *qinfo); void ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, struct rte_eth_txq_info *qinfo); +int ice_rx_burst_mode_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_burst_mode *mode); +int ice_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_burst_mode *mode); int ice_rx_descriptor_status(void *rx_queue, uint16_t offset); int ice_tx_descriptor_status(void *tx_queue, uint16_t offset); void ice_set_default_ptype_table(struct rte_eth_dev *dev); @@ -186,4 +248,36 @@ uint16_t ice_recv_scattered_pkts_vec_avx2(void *rx_queue, uint16_t nb_pkts); uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +int ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc); +int ice_tx_done_cleanup(void *txq, uint32_t free_cnt); + +#define FDIR_PARSING_ENABLE_PER_QUEUE(ad, on) do { \ + int i; \ + for (i = 0; i < (ad)->eth_dev->data->nb_rx_queues; i++) { \ + struct ice_rx_queue *rxq = (ad)->eth_dev->data->rx_queues[i]; \ + if (!rxq) \ + continue; \ + rxq->fdir_enabled = on; \ + } \ + PMD_DRV_LOG(DEBUG, "FDIR processing on RX set to %d", on); \ +} while (0) + +/* Enable/disable flow director parsing from Rx descriptor in data path. */ +static inline +void ice_fdir_rx_parsing_enable(struct ice_adapter *ad, bool on) +{ + if (on) { + /* Enable flow director parsing from Rx descriptor */ + FDIR_PARSING_ENABLE_PER_QUEUE(ad, on); + ad->fdir_ref_cnt++; + } else { + if (ad->fdir_ref_cnt >= 1) { + ad->fdir_ref_cnt--; + + if (ad->fdir_ref_cnt == 0) + FDIR_PARSING_ENABLE_PER_QUEUE(ad, on); + } + } +} + #endif /* _ICE_RXTX_H_ */