X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fenic%2Fbase%2Fvnic_rq.h;h=f3fd39f76792af5ee40f0f62b7d9a5eb1782227d;hb=b5743da31eba03cdc7abae3295f8ba852919c4a3;hp=0f5c3c181d7fe783f810cfd72194523f26f74cff;hpb=da44259fbf272bcd0b17a64709280454eb402b62;p=dpdk.git diff --git a/drivers/net/enic/base/vnic_rq.h b/drivers/net/enic/base/vnic_rq.h index 0f5c3c181d..f3fd39f767 100644 --- a/drivers/net/enic/base/vnic_rq.h +++ b/drivers/net/enic/base/vnic_rq.h @@ -60,48 +60,44 @@ struct vnic_rq_ctrl { u32 pad7; u32 error_status; /* 0x48 */ u32 pad8; - u32 dropped_packet_count; /* 0x50 */ + u32 tcp_sn; /* 0x50 */ u32 pad9; - u32 dropped_packet_count_rc; /* 0x58 */ + u32 unused; /* 0x58 */ u32 pad10; -}; - -/* Break the vnic_rq_buf allocations into blocks of 32/64 entries */ -#define VNIC_RQ_BUF_MIN_BLK_ENTRIES 32 -#define VNIC_RQ_BUF_DFLT_BLK_ENTRIES 64 -#define VNIC_RQ_BUF_BLK_ENTRIES(entries) \ - ((unsigned int)((entries < VNIC_RQ_BUF_DFLT_BLK_ENTRIES) ? \ - VNIC_RQ_BUF_MIN_BLK_ENTRIES : VNIC_RQ_BUF_DFLT_BLK_ENTRIES)) -#define VNIC_RQ_BUF_BLK_SZ(entries) \ - (VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf)) -#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \ - DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries)) -#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096) - -struct vnic_rq_buf { - struct vnic_rq_buf *next; - dma_addr_t dma_addr; - void *os_buf; - unsigned int os_buf_index; - unsigned int len; - unsigned int index; - void *desc; - uint64_t wr_id; + u32 dca_select; /* 0x60 */ + u32 pad11; + u32 dca_value; /* 0x68 */ + u32 pad12; + u32 data_ring; /* 0x70 */ + u32 pad13; + u32 header_split; /* 0x78 */ + u32 pad14; }; struct vnic_rq { unsigned int index; + unsigned int posted_index; struct vnic_dev *vdev; - struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */ + struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */ struct vnic_dev_ring ring; - struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX]; - struct vnic_rq_buf *to_use; - struct vnic_rq_buf *to_clean; + struct rte_mbuf **mbuf_ring; /* array of allocated mbufs */ + unsigned int mbuf_next_idx; /* next mb to consume */ void *os_buf_head; unsigned int pkts_outstanding; - + uint16_t rx_nb_hold; + uint16_t rx_free_thresh; unsigned int socket_id; struct rte_mempool *mp; + uint16_t rxst_idx; + uint32_t tot_pkts; + uint16_t data_queue_idx; + uint8_t data_queue_enable; + uint8_t is_sop; + uint8_t in_use; + struct rte_mbuf *pkt_first_seg; + struct rte_mbuf *pkt_last_seg; + unsigned int max_mbufs_per_pkt; + uint16_t tot_nb_desc; }; static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) @@ -116,119 +112,13 @@ static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) return rq->ring.desc_count - rq->ring.desc_avail - 1; } -static inline void *vnic_rq_next_desc(struct vnic_rq *rq) -{ - return rq->to_use->desc; -} - -static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) -{ - return rq->to_use->index; -} - -static inline void vnic_rq_post(struct vnic_rq *rq, - void *os_buf, unsigned int os_buf_index, - dma_addr_t dma_addr, unsigned int len, - uint64_t wrid) -{ - struct vnic_rq_buf *buf = rq->to_use; - - buf->os_buf = os_buf; - buf->os_buf_index = os_buf_index; - buf->dma_addr = dma_addr; - buf->len = len; - buf->wr_id = wrid; - - buf = buf->next; - rq->to_use = buf; - rq->ring.desc_avail--; - - /* Move the posted_index every nth descriptor - */ - -#ifndef VNIC_RQ_RETURN_RATE -#define VNIC_RQ_RETURN_RATE 0xf /* keep 2^n - 1 */ -#endif - - if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) { - /* Adding write memory barrier prevents compiler and/or CPU - * reordering, thus avoiding descriptor posting before - * descriptor is initialized. Otherwise, hardware can read - * stale descriptor fields. - */ - wmb(); - iowrite32(buf->index, &rq->ctrl->posted_index); - } -} - -static inline void vnic_rq_post_commit(struct vnic_rq *rq, - void *os_buf, unsigned int os_buf_index, - dma_addr_t dma_addr, unsigned int len) -{ - struct vnic_rq_buf *buf = rq->to_use; - - buf->os_buf = os_buf; - buf->os_buf_index = os_buf_index; - buf->dma_addr = dma_addr; - buf->len = len; - - buf = buf->next; - rq->to_use = buf; - rq->ring.desc_avail--; - - /* Move the posted_index every descriptor - */ - - /* Adding write memory barrier prevents compiler and/or CPU - * reordering, thus avoiding descriptor posting before - * descriptor is initialized. Otherwise, hardware can read - * stale descriptor fields. - */ - wmb(); - iowrite32(buf->index, &rq->ctrl->posted_index); -} -static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count) -{ - rq->ring.desc_avail += count; -} enum desc_return_options { VNIC_RQ_RETURN_DESC, VNIC_RQ_DEFER_RETURN_DESC, }; -static inline int vnic_rq_service(struct vnic_rq *rq, - struct cq_desc *cq_desc, u16 completed_index, - int desc_return, int (*buf_service)(struct vnic_rq *rq, - struct cq_desc *cq_desc, struct vnic_rq_buf *buf, - int skipped, void *opaque), void *opaque) -{ - struct vnic_rq_buf *buf; - int skipped; - int eop = 0; - - buf = rq->to_clean; - while (1) { - - skipped = (buf->index != completed_index); - - if ((*buf_service)(rq, cq_desc, buf, skipped, opaque)) - eop++; - - if (desc_return == VNIC_RQ_RETURN_DESC) - rq->ring.desc_avail++; - - rq->to_clean = buf->next; - - if (!skipped) - break; - - buf = rq->to_clean; - } - return eop; -} - static inline int vnic_rq_fill(struct vnic_rq *rq, int (*buf_fill)(struct vnic_rq *rq)) { @@ -274,8 +164,5 @@ unsigned int vnic_rq_error_status(struct vnic_rq *rq); void vnic_rq_enable(struct vnic_rq *rq); int vnic_rq_disable(struct vnic_rq *rq); void vnic_rq_clean(struct vnic_rq *rq, - void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf)); -int vnic_rq_mem_size(struct vnic_rq *rq, unsigned int desc_count, - unsigned int desc_size); - + void (*buf_clean)(struct rte_mbuf **buf)); #endif /* _VNIC_RQ_H_ */