3a4b65ab4c781d6ee2078584eb88243a2952d52e
[dpdk.git] / drivers / net / enic / base / vnic_rq.c
1 /*
2  * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  *
5  * Copyright (c) 2014, Cisco Systems, Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  * notice, this list of conditions and the following disclaimer.
14  *
15  * 2. Redistributions in binary form must reproduce the above copyright
16  * notice, this list of conditions and the following disclaimer in
17  * the documentation and/or other materials provided with the
18  * distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  */
34 #ident "$Id: vnic_rq.c 171146 2014-05-02 07:08:20Z ssujith $"
35
36 #include "vnic_dev.h"
37 #include "vnic_rq.h"
38
39 static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
40 {
41         struct vnic_rq_buf *buf;
42         unsigned int i, j, count = rq->ring.desc_count;
43         unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
44
45         for (i = 0; i < blks; i++) {
46                 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC);
47                 if (!rq->bufs[i])
48                         return -ENOMEM;
49         }
50
51         for (i = 0; i < blks; i++) {
52                 buf = rq->bufs[i];
53                 for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES(count); j++) {
54                         buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES(count) + j;
55                         buf->desc = (u8 *)rq->ring.descs +
56                                 rq->ring.desc_size * buf->index;
57                         if (buf->index + 1 == count) {
58                                 buf->next = rq->bufs[0];
59                                 break;
60                         } else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES(count)) {
61                                 buf->next = rq->bufs[i + 1];
62                         } else {
63                                 buf->next = buf + 1;
64                                 buf++;
65                         }
66                 }
67         }
68
69         rq->to_use = rq->to_clean = rq->bufs[0];
70
71         return 0;
72 }
73
74 int vnic_rq_mem_size(struct vnic_rq *rq, unsigned int desc_count,
75         unsigned int desc_size)
76 {
77         int mem_size = 0;
78
79         mem_size += vnic_dev_desc_ring_size(&rq->ring, desc_count, desc_size);
80
81         mem_size += VNIC_RQ_BUF_BLKS_NEEDED(rq->ring.desc_count) *
82                 VNIC_RQ_BUF_BLK_SZ(rq->ring.desc_count);
83
84         return mem_size;
85 }
86
87 void vnic_rq_free(struct vnic_rq *rq)
88 {
89         struct vnic_dev *vdev;
90         unsigned int i;
91
92         vdev = rq->vdev;
93
94         vnic_dev_free_desc_ring(vdev, &rq->ring);
95
96         for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) {
97                 if (rq->bufs[i]) {
98                         kfree(rq->bufs[i]);
99                         rq->bufs[i] = NULL;
100                 }
101         }
102
103         rq->ctrl = NULL;
104 }
105
106 int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
107         unsigned int desc_count, unsigned int desc_size)
108 {
109         int err;
110         char res_name[NAME_MAX];
111         static int instance;
112
113         rq->index = index;
114         rq->vdev = vdev;
115
116         rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
117         if (!rq->ctrl) {
118                 pr_err("Failed to hook RQ[%d] resource\n", index);
119                 return -EINVAL;
120         }
121
122         vnic_rq_disable(rq);
123
124         snprintf(res_name, sizeof(res_name), "%d-rq-%d", instance++, index);
125         err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size,
126                 rq->socket_id, res_name);
127         if (err)
128                 return err;
129
130         err = vnic_rq_alloc_bufs(rq);
131         if (err) {
132                 vnic_rq_free(rq);
133                 return err;
134         }
135
136         return 0;
137 }
138
139 void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
140         unsigned int fetch_index, unsigned int posted_index,
141         unsigned int error_interrupt_enable,
142         unsigned int error_interrupt_offset)
143 {
144         u64 paddr;
145         unsigned int count = rq->ring.desc_count;
146
147         paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
148         writeq(paddr, &rq->ctrl->ring_base);
149         iowrite32(count, &rq->ctrl->ring_size);
150         iowrite32(cq_index, &rq->ctrl->cq_index);
151         iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
152         iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
153         iowrite32(0, &rq->ctrl->dropped_packet_count);
154         iowrite32(0, &rq->ctrl->error_status);
155         iowrite32(fetch_index, &rq->ctrl->fetch_index);
156         iowrite32(posted_index, &rq->ctrl->posted_index);
157
158         rq->to_use = rq->to_clean =
159                 &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)]
160                         [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)];
161 }
162
163 void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
164         unsigned int error_interrupt_enable,
165         unsigned int error_interrupt_offset)
166 {
167         u32 fetch_index = 0;
168         /* Use current fetch_index as the ring starting point */
169         fetch_index = ioread32(&rq->ctrl->fetch_index);
170
171         if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone  */
172                 /* Hardware surprise removal: reset fetch_index */
173                 fetch_index = 0;
174         }
175
176         vnic_rq_init_start(rq, cq_index,
177                 fetch_index, fetch_index,
178                 error_interrupt_enable,
179                 error_interrupt_offset);
180 }
181
182 void vnic_rq_error_out(struct vnic_rq *rq, unsigned int error)
183 {
184         iowrite32(error, &rq->ctrl->error_status);
185 }
186
187 unsigned int vnic_rq_error_status(struct vnic_rq *rq)
188 {
189         return ioread32(&rq->ctrl->error_status);
190 }
191
192 void vnic_rq_enable(struct vnic_rq *rq)
193 {
194         iowrite32(1, &rq->ctrl->enable);
195 }
196
197 int vnic_rq_disable(struct vnic_rq *rq)
198 {
199         unsigned int wait;
200
201         iowrite32(0, &rq->ctrl->enable);
202
203         /* Wait for HW to ACK disable request */
204         for (wait = 0; wait < 1000; wait++) {
205                 if (!(ioread32(&rq->ctrl->running)))
206                         return 0;
207                 udelay(10);
208         }
209
210         pr_err("Failed to disable RQ[%d]\n", rq->index);
211
212         return -ETIMEDOUT;
213 }
214
215 void vnic_rq_clean(struct vnic_rq *rq,
216         void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf))
217 {
218         struct vnic_rq_buf *buf;
219         u32 fetch_index;
220         unsigned int count = rq->ring.desc_count;
221
222         buf = rq->to_clean;
223
224         while (vnic_rq_desc_used(rq) > 0) {
225
226                 (*buf_clean)(rq, buf);
227
228                 buf = rq->to_clean = buf->next;
229                 rq->ring.desc_avail++;
230         }
231
232         /* Use current fetch_index as the ring starting point */
233         fetch_index = ioread32(&rq->ctrl->fetch_index);
234
235         if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone  */
236                 /* Hardware surprise removal: reset fetch_index */
237                 fetch_index = 0;
238         }
239         rq->to_use = rq->to_clean =
240                 &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)]
241                         [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)];
242         iowrite32(fetch_index, &rq->ctrl->posted_index);
243
244         vnic_dev_clear_desc_ring(&rq->ring);
245 }