1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
14 /* Receive queue control */
16 u64 ring_base; /* 0x00 */
17 u32 ring_size; /* 0x08 */
19 u32 posted_index; /* 0x10 */
21 u32 cq_index; /* 0x18 */
23 u32 enable; /* 0x20 */
25 u32 running; /* 0x28 */
27 u32 fetch_index; /* 0x30 */
29 u32 error_interrupt_enable; /* 0x38 */
31 u32 error_interrupt_offset; /* 0x40 */
33 u32 error_status; /* 0x48 */
35 u32 tcp_sn; /* 0x50 */
37 u32 unused; /* 0x58 */
39 u32 dca_select; /* 0x60 */
41 u32 dca_value; /* 0x68 */
43 u32 data_ring; /* 0x70 */
45 u32 header_split; /* 0x78 */
51 unsigned int posted_index;
52 struct vnic_dev *vdev;
53 struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */
54 struct vnic_dev_ring ring;
55 struct rte_mbuf **free_mbufs; /* reserve of free mbufs */
57 struct rte_mbuf **mbuf_ring; /* array of allocated mbufs */
58 unsigned int mbuf_next_idx; /* next mb to consume */
60 unsigned int pkts_outstanding;
62 uint16_t rx_free_thresh;
63 unsigned int socket_id;
64 struct rte_mempool *mp;
67 uint16_t data_queue_idx;
68 uint8_t data_queue_enable;
71 struct rte_mbuf *pkt_first_seg;
72 struct rte_mbuf *pkt_last_seg;
73 unsigned int max_mbufs_per_pkt;
75 bool need_initial_post;
78 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
80 /* how many does SW own? */
81 return rq->ring.desc_avail;
84 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
86 /* how many does HW own? */
87 return rq->ring.desc_count - rq->ring.desc_avail - 1;
92 enum desc_return_options {
94 VNIC_RQ_DEFER_RETURN_DESC,
97 static inline int vnic_rq_fill(struct vnic_rq *rq,
98 int (*buf_fill)(struct vnic_rq *rq))
102 while (vnic_rq_desc_avail(rq) > 0) {
104 err = (*buf_fill)(rq);
112 static inline int vnic_rq_fill_count(struct vnic_rq *rq,
113 int (*buf_fill)(struct vnic_rq *rq), unsigned int count)
117 while ((vnic_rq_desc_avail(rq) > 0) && (count--)) {
119 err = (*buf_fill)(rq);
127 void vnic_rq_free(struct vnic_rq *rq);
128 int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
129 unsigned int desc_count, unsigned int desc_size);
130 void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
131 unsigned int fetch_index, unsigned int posted_index,
132 unsigned int error_interrupt_enable,
133 unsigned int error_interrupt_offset);
134 void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
135 unsigned int error_interrupt_enable,
136 unsigned int error_interrupt_offset);
137 void vnic_rq_error_out(struct vnic_rq *rq, unsigned int error);
138 unsigned int vnic_rq_error_status(struct vnic_rq *rq);
139 void vnic_rq_enable(struct vnic_rq *rq);
140 int vnic_rq_disable(struct vnic_rq *rq);
141 void vnic_rq_clean(struct vnic_rq *rq,
142 void (*buf_clean)(struct rte_mbuf **buf));
143 #endif /* _VNIC_RQ_H_ */