2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * Copyright (c) 2014, Cisco Systems, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
42 /* Receive queue control */
44 u64 ring_base; /* 0x00 */
45 u32 ring_size; /* 0x08 */
47 u32 posted_index; /* 0x10 */
49 u32 cq_index; /* 0x18 */
51 u32 enable; /* 0x20 */
53 u32 running; /* 0x28 */
55 u32 fetch_index; /* 0x30 */
57 u32 error_interrupt_enable; /* 0x38 */
59 u32 error_interrupt_offset; /* 0x40 */
61 u32 error_status; /* 0x48 */
63 u32 dropped_packet_count; /* 0x50 */
65 u32 dropped_packet_count_rc; /* 0x58 */
69 /* Break the vnic_rq_buf allocations into blocks of 32/64 entries */
70 #define VNIC_RQ_BUF_MIN_BLK_ENTRIES 32
71 #define VNIC_RQ_BUF_DFLT_BLK_ENTRIES 64
72 #define VNIC_RQ_BUF_BLK_ENTRIES(entries) \
73 ((unsigned int)((entries < VNIC_RQ_BUF_DFLT_BLK_ENTRIES) ? \
74 VNIC_RQ_BUF_MIN_BLK_ENTRIES : VNIC_RQ_BUF_DFLT_BLK_ENTRIES))
75 #define VNIC_RQ_BUF_BLK_SZ(entries) \
76 (VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf))
77 #define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
78 DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries))
79 #define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
82 struct vnic_rq_buf *next;
85 unsigned int os_buf_index;
94 struct vnic_dev *vdev;
95 struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */
96 struct vnic_dev_ring ring;
97 struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX];
98 struct vnic_rq_buf *to_use;
99 struct vnic_rq_buf *to_clean;
101 unsigned int pkts_outstanding;
103 unsigned int socket_id;
104 struct rte_mempool *mp;
107 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
109 /* how many does SW own? */
110 return rq->ring.desc_avail;
113 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
115 /* how many does HW own? */
116 return rq->ring.desc_count - rq->ring.desc_avail - 1;
119 static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
121 return rq->to_use->desc;
124 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
126 return rq->to_use->index;
129 static inline void vnic_rq_post(struct vnic_rq *rq,
130 void *os_buf, unsigned int os_buf_index,
131 dma_addr_t dma_addr, unsigned int len,
134 struct vnic_rq_buf *buf = rq->to_use;
136 buf->os_buf = os_buf;
137 buf->os_buf_index = os_buf_index;
138 buf->dma_addr = dma_addr;
144 rq->ring.desc_avail--;
146 /* Move the posted_index every nth descriptor
149 #ifndef VNIC_RQ_RETURN_RATE
150 #define VNIC_RQ_RETURN_RATE 0xf /* keep 2^n - 1 */
153 if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
154 /* Adding write memory barrier prevents compiler and/or CPU
155 * reordering, thus avoiding descriptor posting before
156 * descriptor is initialized. Otherwise, hardware can read
157 * stale descriptor fields.
160 iowrite32(buf->index, &rq->ctrl->posted_index);
164 static inline void vnic_rq_post_commit(struct vnic_rq *rq,
165 void *os_buf, unsigned int os_buf_index,
166 dma_addr_t dma_addr, unsigned int len)
168 struct vnic_rq_buf *buf = rq->to_use;
170 buf->os_buf = os_buf;
171 buf->os_buf_index = os_buf_index;
172 buf->dma_addr = dma_addr;
177 rq->ring.desc_avail--;
179 /* Move the posted_index every descriptor
182 /* Adding write memory barrier prevents compiler and/or CPU
183 * reordering, thus avoiding descriptor posting before
184 * descriptor is initialized. Otherwise, hardware can read
185 * stale descriptor fields.
188 iowrite32(buf->index, &rq->ctrl->posted_index);
191 static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
193 rq->ring.desc_avail += count;
196 enum desc_return_options {
198 VNIC_RQ_DEFER_RETURN_DESC,
201 static inline int vnic_rq_service(struct vnic_rq *rq,
202 struct cq_desc *cq_desc, u16 completed_index,
203 int desc_return, int (*buf_service)(struct vnic_rq *rq,
204 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
205 int skipped, void *opaque), void *opaque)
207 struct vnic_rq_buf *buf;
214 skipped = (buf->index != completed_index);
216 if ((*buf_service)(rq, cq_desc, buf, skipped, opaque))
219 if (desc_return == VNIC_RQ_RETURN_DESC)
220 rq->ring.desc_avail++;
222 rq->to_clean = buf->next;
232 static inline int vnic_rq_fill(struct vnic_rq *rq,
233 int (*buf_fill)(struct vnic_rq *rq))
237 while (vnic_rq_desc_avail(rq) > 0) {
239 err = (*buf_fill)(rq);
247 static inline int vnic_rq_fill_count(struct vnic_rq *rq,
248 int (*buf_fill)(struct vnic_rq *rq), unsigned int count)
252 while ((vnic_rq_desc_avail(rq) > 0) && (count--)) {
254 err = (*buf_fill)(rq);
262 void vnic_rq_free(struct vnic_rq *rq);
263 int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
264 unsigned int desc_count, unsigned int desc_size);
265 void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
266 unsigned int fetch_index, unsigned int posted_index,
267 unsigned int error_interrupt_enable,
268 unsigned int error_interrupt_offset);
269 void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
270 unsigned int error_interrupt_enable,
271 unsigned int error_interrupt_offset);
272 void vnic_rq_error_out(struct vnic_rq *rq, unsigned int error);
273 unsigned int vnic_rq_error_status(struct vnic_rq *rq);
274 void vnic_rq_enable(struct vnic_rq *rq);
275 int vnic_rq_disable(struct vnic_rq *rq);
276 void vnic_rq_clean(struct vnic_rq *rq,
277 void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf));
278 int vnic_rq_mem_size(struct vnic_rq *rq, unsigned int desc_count,
279 unsigned int desc_size);
281 #endif /* _VNIC_RQ_H_ */