2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * Copyright (c) 2014, Cisco Systems, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
34 #ident "$Id: vnic_rq.h 180262 2014-07-02 07:57:43Z gvaradar $"
43 /* Receive queue control */
45 u64 ring_base; /* 0x00 */
46 u32 ring_size; /* 0x08 */
48 u32 posted_index; /* 0x10 */
50 u32 cq_index; /* 0x18 */
52 u32 enable; /* 0x20 */
54 u32 running; /* 0x28 */
56 u32 fetch_index; /* 0x30 */
58 u32 error_interrupt_enable; /* 0x38 */
60 u32 error_interrupt_offset; /* 0x40 */
62 u32 error_status; /* 0x48 */
64 u32 dropped_packet_count; /* 0x50 */
66 u32 dropped_packet_count_rc; /* 0x58 */
70 /* Break the vnic_rq_buf allocations into blocks of 32/64 entries */
71 #define VNIC_RQ_BUF_MIN_BLK_ENTRIES 32
72 #define VNIC_RQ_BUF_DFLT_BLK_ENTRIES 64
73 #define VNIC_RQ_BUF_BLK_ENTRIES(entries) \
74 ((unsigned int)((entries < VNIC_RQ_BUF_DFLT_BLK_ENTRIES) ? \
75 VNIC_RQ_BUF_MIN_BLK_ENTRIES : VNIC_RQ_BUF_DFLT_BLK_ENTRIES))
76 #define VNIC_RQ_BUF_BLK_SZ(entries) \
77 (VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf))
78 #define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
79 DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries))
80 #define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
83 struct vnic_rq_buf *next;
86 unsigned int os_buf_index;
95 struct vnic_dev *vdev;
96 struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */
97 struct vnic_dev_ring ring;
98 struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX];
99 struct vnic_rq_buf *to_use;
100 struct vnic_rq_buf *to_clean;
102 unsigned int pkts_outstanding;
104 unsigned int socket_id;
105 struct rte_mempool *mp;
108 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
110 /* how many does SW own? */
111 return rq->ring.desc_avail;
114 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
116 /* how many does HW own? */
117 return rq->ring.desc_count - rq->ring.desc_avail - 1;
120 static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
122 return rq->to_use->desc;
125 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
127 return rq->to_use->index;
130 static inline void vnic_rq_post(struct vnic_rq *rq,
131 void *os_buf, unsigned int os_buf_index,
132 dma_addr_t dma_addr, unsigned int len,
135 struct vnic_rq_buf *buf = rq->to_use;
137 buf->os_buf = os_buf;
138 buf->os_buf_index = os_buf_index;
139 buf->dma_addr = dma_addr;
145 rq->ring.desc_avail--;
147 /* Move the posted_index every nth descriptor
150 #ifndef VNIC_RQ_RETURN_RATE
151 #define VNIC_RQ_RETURN_RATE 0xf /* keep 2^n - 1 */
154 if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
155 /* Adding write memory barrier prevents compiler and/or CPU
156 * reordering, thus avoiding descriptor posting before
157 * descriptor is initialized. Otherwise, hardware can read
158 * stale descriptor fields.
161 iowrite32(buf->index, &rq->ctrl->posted_index);
165 static inline void vnic_rq_post_commit(struct vnic_rq *rq,
166 void *os_buf, unsigned int os_buf_index,
167 dma_addr_t dma_addr, unsigned int len)
169 struct vnic_rq_buf *buf = rq->to_use;
171 buf->os_buf = os_buf;
172 buf->os_buf_index = os_buf_index;
173 buf->dma_addr = dma_addr;
178 rq->ring.desc_avail--;
180 /* Move the posted_index every descriptor
183 /* Adding write memory barrier prevents compiler and/or CPU
184 * reordering, thus avoiding descriptor posting before
185 * descriptor is initialized. Otherwise, hardware can read
186 * stale descriptor fields.
189 iowrite32(buf->index, &rq->ctrl->posted_index);
192 static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
194 rq->ring.desc_avail += count;
197 enum desc_return_options {
199 VNIC_RQ_DEFER_RETURN_DESC,
202 static inline int vnic_rq_service(struct vnic_rq *rq,
203 struct cq_desc *cq_desc, u16 completed_index,
204 int desc_return, int (*buf_service)(struct vnic_rq *rq,
205 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
206 int skipped, void *opaque), void *opaque)
208 struct vnic_rq_buf *buf;
215 skipped = (buf->index != completed_index);
217 if ((*buf_service)(rq, cq_desc, buf, skipped, opaque))
220 if (desc_return == VNIC_RQ_RETURN_DESC)
221 rq->ring.desc_avail++;
223 rq->to_clean = buf->next;
233 static inline int vnic_rq_fill(struct vnic_rq *rq,
234 int (*buf_fill)(struct vnic_rq *rq))
238 while (vnic_rq_desc_avail(rq) > 0) {
240 err = (*buf_fill)(rq);
248 static inline int vnic_rq_fill_count(struct vnic_rq *rq,
249 int (*buf_fill)(struct vnic_rq *rq), unsigned int count)
253 while ((vnic_rq_desc_avail(rq) > 0) && (count--)) {
255 err = (*buf_fill)(rq);
263 void vnic_rq_free(struct vnic_rq *rq);
264 int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
265 unsigned int desc_count, unsigned int desc_size);
266 void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
267 unsigned int fetch_index, unsigned int posted_index,
268 unsigned int error_interrupt_enable,
269 unsigned int error_interrupt_offset);
270 void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
271 unsigned int error_interrupt_enable,
272 unsigned int error_interrupt_offset);
273 void vnic_rq_error_out(struct vnic_rq *rq, unsigned int error);
274 unsigned int vnic_rq_error_status(struct vnic_rq *rq);
275 void vnic_rq_enable(struct vnic_rq *rq);
276 int vnic_rq_disable(struct vnic_rq *rq);
277 void vnic_rq_clean(struct vnic_rq *rq,
278 void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf));
279 int vnic_rq_mem_size(struct vnic_rq *rq, unsigned int desc_count,
280 unsigned int desc_size);
282 #endif /* _VNIC_RQ_H_ */