X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fiavf%2Fiavf_rxtx.h;h=3d02c6589d660096baa52ecfc15a4d803f81b19b;hb=c1d4e9d37abdc6c07a05f7d96928e624fea9ebb5;hp=8246e797f55cebdca893b2b9bb79ef8edb04bd4c;hpb=b8b4c54ef9b03a4774fa82b4ec7a811d0900c0ab;p=dpdk.git diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h index 8246e797f5..3d02c6589d 100644 --- a/drivers/net/iavf/iavf_rxtx.h +++ b/drivers/net/iavf/iavf_rxtx.h @@ -104,6 +104,7 @@ struct iavf_rx_queue { uint16_t port_id; /* device port ID */ uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */ + uint8_t fdir_enabled; /* 0 if FDIR disabled, 1 when enabled */ uint16_t queue_id; /* Rx queue index */ uint16_t rx_buf_len; /* The packet buffer size */ uint16_t rx_hdr_len; /* The header buffer size */ @@ -378,6 +379,7 @@ int iavf_dev_tx_queue_setup(struct rte_eth_dev *dev, const struct rte_eth_txconf *tx_conf); int iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); int iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); +int iavf_dev_tx_done_cleanup(void *txq, uint32_t free_cnt); void iavf_dev_tx_queue_release(void *txq); void iavf_stop_queues(struct rte_eth_dev *dev); uint16_t iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, @@ -407,16 +409,27 @@ int iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset); uint16_t iavf_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +uint16_t iavf_recv_pkts_vec_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); uint16_t iavf_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +uint16_t iavf_recv_scattered_pkts_vec_flex_rxd(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); uint16_t iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); uint16_t iavf_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +uint16_t iavf_recv_pkts_vec_avx2_flex_rxd(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); uint16_t iavf_recv_scattered_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +uint16_t iavf_recv_scattered_pkts_vec_avx2_flex_rxd(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); uint16_t iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); uint16_t iavf_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, @@ -480,6 +493,35 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq, tx_desc->cmd_type_offset_bsz); } +#define FDIR_PROC_ENABLE_PER_QUEUE(ad, on) do { \ + int i; \ + for (i = 0; i < (ad)->eth_dev->data->nb_rx_queues; i++) { \ + struct iavf_rx_queue *rxq = (ad)->eth_dev->data->rx_queues[i]; \ + if (!rxq) \ + continue; \ + rxq->fdir_enabled = on; \ + } \ + PMD_DRV_LOG(DEBUG, "FDIR processing on RX set to %d", on); \ +} while (0) + +/* Enable/disable flow director Rx processing in data path. */ +static inline +void iavf_fdir_rx_proc_enable(struct iavf_adapter *ad, bool on) +{ + if (on) { + /* enable flow director processing */ + FDIR_PROC_ENABLE_PER_QUEUE(ad, on); + ad->fdir_ref_cnt++; + } else { + if (ad->fdir_ref_cnt >= 1) { + ad->fdir_ref_cnt--; + + if (ad->fdir_ref_cnt == 0) + FDIR_PROC_ENABLE_PER_QUEUE(ad, on); + } + } +} + #ifdef RTE_LIBRTE_IAVF_DEBUG_DUMP_DESC #define IAVF_DUMP_RX_DESC(rxq, desc, rx_id) \ iavf_dump_rx_descriptor(rxq, desc, rx_id)