enic: remove #ident lines
[dpdk.git] / drivers / net / enic / base / vnic_rq.c
1 /*
2  * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  *
5  * Copyright (c) 2014, Cisco Systems, Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  * notice, this list of conditions and the following disclaimer.
14  *
15  * 2. Redistributions in binary form must reproduce the above copyright
16  * notice, this list of conditions and the following disclaimer in
17  * the documentation and/or other materials provided with the
18  * distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  */
34
35 #include "vnic_dev.h"
36 #include "vnic_rq.h"
37
38 static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
39 {
40         struct vnic_rq_buf *buf;
41         unsigned int i, j, count = rq->ring.desc_count;
42         unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
43
44         for (i = 0; i < blks; i++) {
45                 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC);
46                 if (!rq->bufs[i])
47                         return -ENOMEM;
48         }
49
50         for (i = 0; i < blks; i++) {
51                 buf = rq->bufs[i];
52                 for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES(count); j++) {
53                         buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES(count) + j;
54                         buf->desc = (u8 *)rq->ring.descs +
55                                 rq->ring.desc_size * buf->index;
56                         if (buf->index + 1 == count) {
57                                 buf->next = rq->bufs[0];
58                                 break;
59                         } else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES(count)) {
60                                 buf->next = rq->bufs[i + 1];
61                         } else {
62                                 buf->next = buf + 1;
63                                 buf++;
64                         }
65                 }
66         }
67
68         rq->to_use = rq->to_clean = rq->bufs[0];
69
70         return 0;
71 }
72
73 int vnic_rq_mem_size(struct vnic_rq *rq, unsigned int desc_count,
74         unsigned int desc_size)
75 {
76         int mem_size = 0;
77
78         mem_size += vnic_dev_desc_ring_size(&rq->ring, desc_count, desc_size);
79
80         mem_size += VNIC_RQ_BUF_BLKS_NEEDED(rq->ring.desc_count) *
81                 VNIC_RQ_BUF_BLK_SZ(rq->ring.desc_count);
82
83         return mem_size;
84 }
85
86 void vnic_rq_free(struct vnic_rq *rq)
87 {
88         struct vnic_dev *vdev;
89         unsigned int i;
90
91         vdev = rq->vdev;
92
93         vnic_dev_free_desc_ring(vdev, &rq->ring);
94
95         for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) {
96                 if (rq->bufs[i]) {
97                         kfree(rq->bufs[i]);
98                         rq->bufs[i] = NULL;
99                 }
100         }
101
102         rq->ctrl = NULL;
103 }
104
105 int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
106         unsigned int desc_count, unsigned int desc_size)
107 {
108         int err;
109         char res_name[NAME_MAX];
110         static int instance;
111
112         rq->index = index;
113         rq->vdev = vdev;
114
115         rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
116         if (!rq->ctrl) {
117                 pr_err("Failed to hook RQ[%d] resource\n", index);
118                 return -EINVAL;
119         }
120
121         vnic_rq_disable(rq);
122
123         snprintf(res_name, sizeof(res_name), "%d-rq-%d", instance++, index);
124         err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size,
125                 rq->socket_id, res_name);
126         if (err)
127                 return err;
128
129         err = vnic_rq_alloc_bufs(rq);
130         if (err) {
131                 vnic_rq_free(rq);
132                 return err;
133         }
134
135         return 0;
136 }
137
138 void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
139         unsigned int fetch_index, unsigned int posted_index,
140         unsigned int error_interrupt_enable,
141         unsigned int error_interrupt_offset)
142 {
143         u64 paddr;
144         unsigned int count = rq->ring.desc_count;
145
146         paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
147         writeq(paddr, &rq->ctrl->ring_base);
148         iowrite32(count, &rq->ctrl->ring_size);
149         iowrite32(cq_index, &rq->ctrl->cq_index);
150         iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
151         iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
152         iowrite32(0, &rq->ctrl->dropped_packet_count);
153         iowrite32(0, &rq->ctrl->error_status);
154         iowrite32(fetch_index, &rq->ctrl->fetch_index);
155         iowrite32(posted_index, &rq->ctrl->posted_index);
156
157         rq->to_use = rq->to_clean =
158                 &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)]
159                         [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)];
160 }
161
162 void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
163         unsigned int error_interrupt_enable,
164         unsigned int error_interrupt_offset)
165 {
166         u32 fetch_index = 0;
167         /* Use current fetch_index as the ring starting point */
168         fetch_index = ioread32(&rq->ctrl->fetch_index);
169
170         if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone  */
171                 /* Hardware surprise removal: reset fetch_index */
172                 fetch_index = 0;
173         }
174
175         vnic_rq_init_start(rq, cq_index,
176                 fetch_index, fetch_index,
177                 error_interrupt_enable,
178                 error_interrupt_offset);
179 }
180
181 void vnic_rq_error_out(struct vnic_rq *rq, unsigned int error)
182 {
183         iowrite32(error, &rq->ctrl->error_status);
184 }
185
186 unsigned int vnic_rq_error_status(struct vnic_rq *rq)
187 {
188         return ioread32(&rq->ctrl->error_status);
189 }
190
191 void vnic_rq_enable(struct vnic_rq *rq)
192 {
193         iowrite32(1, &rq->ctrl->enable);
194 }
195
196 int vnic_rq_disable(struct vnic_rq *rq)
197 {
198         unsigned int wait;
199
200         iowrite32(0, &rq->ctrl->enable);
201
202         /* Wait for HW to ACK disable request */
203         for (wait = 0; wait < 1000; wait++) {
204                 if (!(ioread32(&rq->ctrl->running)))
205                         return 0;
206                 udelay(10);
207         }
208
209         pr_err("Failed to disable RQ[%d]\n", rq->index);
210
211         return -ETIMEDOUT;
212 }
213
214 void vnic_rq_clean(struct vnic_rq *rq,
215         void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf))
216 {
217         struct vnic_rq_buf *buf;
218         u32 fetch_index;
219         unsigned int count = rq->ring.desc_count;
220
221         buf = rq->to_clean;
222
223         while (vnic_rq_desc_used(rq) > 0) {
224
225                 (*buf_clean)(rq, buf);
226
227                 buf = rq->to_clean = buf->next;
228                 rq->ring.desc_avail++;
229         }
230
231         /* Use current fetch_index as the ring starting point */
232         fetch_index = ioread32(&rq->ctrl->fetch_index);
233
234         if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone  */
235                 /* Hardware surprise removal: reset fetch_index */
236                 fetch_index = 0;
237         }
238         rq->to_use = rq->to_clean =
239                 &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)]
240                         [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)];
241         iowrite32(fetch_index, &rq->ctrl->posted_index);
242
243         vnic_dev_clear_desc_ring(&rq->ring);
244 }