X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Ffm10k%2Ffm10k.h;h=060982b1082e9e27b3f548a3b7193dd46db3cec8;hb=1e3a39f72d5d088cd3038241d799dd102e50f0d2;hp=439e95fd2e00363c80a3d93569b4fa9e8979efc0;hpb=8b5c9ec20b7b48784d5a41db7506b4e514950d1d;p=dpdk.git diff --git a/drivers/net/fm10k/fm10k.h b/drivers/net/fm10k/fm10k.h index 439e95fd2e..060982b108 100644 --- a/drivers/net/fm10k/fm10k.h +++ b/drivers/net/fm10k/fm10k.h @@ -69,6 +69,9 @@ #define FM10K_MAX_RX_DESC (FM10K_MAX_RX_RING_SZ / sizeof(union fm10k_rx_desc)) #define FM10K_MAX_TX_DESC (FM10K_MAX_TX_RING_SZ / sizeof(struct fm10k_tx_desc)) +#define FM10K_TX_MAX_SEG UINT8_MAX +#define FM10K_TX_MAX_MTU_SEG UINT8_MAX + /* * byte aligment for HW RX data buffer * Datasheet requires RX buffer addresses shall either be 512-byte aligned or @@ -123,6 +126,18 @@ #define FM10K_VFTA_BIT(vlan_id) (1 << ((vlan_id) & 0x1F)) #define FM10K_VFTA_IDX(vlan_id) ((vlan_id) >> 5) +#define RTE_FM10K_RXQ_REARM_THRESH 32 +#define RTE_FM10K_VPMD_TX_BURST 32 +#define RTE_FM10K_MAX_RX_BURST RTE_FM10K_RXQ_REARM_THRESH +#define RTE_FM10K_TX_MAX_FREE_BUF_SZ 64 +#define RTE_FM10K_DESCS_PER_LOOP 4 + +#define FM10K_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET +#define FM10K_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET + +#define FM10K_SIMPLE_TX_FLAG ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \ + ETH_TXQ_FLAGS_NOOFFLOADS) + struct fm10k_macvlan_filter_info { uint16_t vlan_num; /* Total VLAN number */ uint16_t mac_num; /* Total mac number */ @@ -138,6 +153,8 @@ struct fm10k_dev_info { /* Protect the mailbox to avoid race condition */ rte_spinlock_t mbx_lock; struct fm10k_macvlan_filter_info macvlan; + /* Flag to indicate if RX vector conditions satisfied */ + bool rx_vec_allowed; }; /* @@ -168,19 +185,29 @@ struct fm10k_rx_queue { struct rte_mempool *mp; struct rte_mbuf **sw_ring; volatile union fm10k_rx_desc *hw_ring; - struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */ - struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */ + struct rte_mbuf *pkt_first_seg; /* First segment of current packet. */ + struct rte_mbuf *pkt_last_seg; /* Last segment of current packet. */ uint64_t hw_ring_phys_addr; + uint64_t mbuf_initializer; /* value to init mbufs */ + /* need to alloc dummy mbuf, for wraparound when scanning hw ring */ + struct rte_mbuf fake_mbuf; uint16_t next_dd; uint16_t next_alloc; uint16_t next_trigger; uint16_t alloc_thresh; volatile uint32_t *tail_ptr; uint16_t nb_desc; + /* Number of faked desc added at the tail for Vector RX function */ + uint16_t nb_fake_desc; uint16_t queue_id; - uint8_t port_id; + /* Below 2 fields only valid in case vPMD is applied. */ + uint16_t rxrearm_nb; /* number of remaining to be re-armed */ + uint16_t rxrearm_start; /* the idx we start the re-arming from */ + uint16_t rx_using_sse; /* indicates that vector RX is in use */ + uint16_t port_id; uint8_t drop_en; - uint8_t rx_deferred_start; /**< don't start this queue in dev start. */ + uint8_t rx_deferred_start; /* don't start this queue in dev start. */ + uint16_t rx_ftag_en; /* indicates FTAG RX supported */ }; /* @@ -194,22 +221,34 @@ struct fifo { uint16_t *endp; }; +struct fm10k_txq_ops; + struct fm10k_tx_queue { struct rte_mbuf **sw_ring; struct fm10k_tx_desc *hw_ring; uint64_t hw_ring_phys_addr; struct fifo rs_tracker; + const struct fm10k_txq_ops *ops; /* txq ops */ uint16_t last_free; uint16_t next_free; uint16_t nb_free; uint16_t nb_used; uint16_t free_thresh; uint16_t rs_thresh; + /* Below 2 fields only valid in case vPMD is applied. */ + uint16_t next_rs; /* Next pos to set RS flag */ + uint16_t next_dd; /* Next pos to check DD flag */ volatile uint32_t *tail_ptr; + uint32_t txq_flags; /* Holds flags for this TXq */ uint16_t nb_desc; - uint8_t port_id; - uint8_t tx_deferred_start; /** < don't start this queue in dev start. */ + uint16_t port_id; + uint8_t tx_deferred_start; /** don't start this queue in dev start. */ uint16_t queue_id; + uint16_t tx_ftag_en; /* indicates FTAG TX supported */ +}; + +struct fm10k_txq_ops { + void (*reset)(struct fm10k_tx_queue *txq); }; #define MBUF_DMA_ADDR(mb) \ @@ -250,7 +289,7 @@ static inline uint16_t fifo_remove(struct fifo *fifo) } static inline void -fm10k_pktmbuf_reset(struct rte_mbuf *mb, uint8_t in_port) +fm10k_pktmbuf_reset(struct rte_mbuf *mb, uint16_t in_port) { rte_mbuf_refcnt_set(mb, 1); mb->next = NULL; @@ -314,6 +353,24 @@ uint16_t fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t fm10k_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +int +fm10k_dev_rx_descriptor_done(void *rx_queue, uint16_t offset); + uint16_t fm10k_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); + +uint16_t fm10k_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + +int fm10k_rxq_vec_setup(struct fm10k_rx_queue *rxq); +int fm10k_rx_vec_condition_check(struct rte_eth_dev *); +void fm10k_rx_queue_release_mbufs_vec(struct fm10k_rx_queue *rxq); +uint16_t fm10k_recv_pkts_vec(void *, struct rte_mbuf **, uint16_t); +uint16_t fm10k_recv_scattered_pkts_vec(void *, struct rte_mbuf **, + uint16_t); +uint16_t fm10k_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +void fm10k_txq_vec_setup(struct fm10k_tx_queue *txq); +int fm10k_tx_vec_condition_check(struct fm10k_tx_queue *txq); + #endif