net/sfc: validate Rx queue buffers setup
[dpdk.git] / drivers / net / sfc / sfc_rx.c
1 /*-
2  * Copyright (c) 2016 Solarflare Communications Inc.
3  * All rights reserved.
4  *
5  * This software was jointly developed between OKTET Labs (under contract
6  * for Solarflare) and Solarflare Communications, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright notice,
14  *    this list of conditions and the following disclaimer in the documentation
15  *    and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #include "efx.h"
31
32 #include "sfc.h"
33 #include "sfc_log.h"
34 #include "sfc_ev.h"
35 #include "sfc_rx.h"
36
37 static int
38 sfc_rx_qcheck_conf(struct sfc_adapter *sa,
39                    const struct rte_eth_rxconf *rx_conf)
40 {
41         int rc = 0;
42
43         if (rx_conf->rx_thresh.pthresh != 0 ||
44             rx_conf->rx_thresh.hthresh != 0 ||
45             rx_conf->rx_thresh.wthresh != 0) {
46                 sfc_err(sa,
47                         "RxQ prefetch/host/writeback thresholds are not supported");
48                 rc = EINVAL;
49         }
50
51         if (rx_conf->rx_free_thresh != 0) {
52                 sfc_err(sa, "RxQ free threshold is not supported");
53                 rc = EINVAL;
54         }
55
56         if (rx_conf->rx_drop_en == 0) {
57                 sfc_err(sa, "RxQ drop disable is not supported");
58                 rc = EINVAL;
59         }
60
61         if (rx_conf->rx_deferred_start != 0) {
62                 sfc_err(sa, "RxQ deferred start is not supported");
63                 rc = EINVAL;
64         }
65
66         return rc;
67 }
68
69 static unsigned int
70 sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool)
71 {
72         uint32_t data_off;
73         uint32_t order;
74
75         /* The mbuf object itself is always cache line aligned */
76         order = rte_bsf32(RTE_CACHE_LINE_SIZE);
77
78         /* Data offset from mbuf object start */
79         data_off = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mb_pool) +
80                 RTE_PKTMBUF_HEADROOM;
81
82         order = MIN(order, rte_bsf32(data_off));
83
84         return 1u << (order - 1);
85 }
86
87 static uint16_t
88 sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool)
89 {
90         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
91         const uint32_t nic_align_start = MAX(1, encp->enc_rx_buf_align_start);
92         const uint32_t nic_align_end = MAX(1, encp->enc_rx_buf_align_end);
93         uint16_t buf_size;
94         unsigned int buf_aligned;
95         unsigned int start_alignment;
96         unsigned int end_padding_alignment;
97
98         /* Below it is assumed that both alignments are power of 2 */
99         SFC_ASSERT(rte_is_power_of_2(nic_align_start));
100         SFC_ASSERT(rte_is_power_of_2(nic_align_end));
101
102         /*
103          * mbuf is always cache line aligned, double-check
104          * that it meets rx buffer start alignment requirements.
105          */
106
107         /* Start from mbuf pool data room size */
108         buf_size = rte_pktmbuf_data_room_size(mb_pool);
109
110         /* Remove headroom */
111         if (buf_size <= RTE_PKTMBUF_HEADROOM) {
112                 sfc_err(sa,
113                         "RxQ mbuf pool %s object data room size %u is smaller than headroom %u",
114                         mb_pool->name, buf_size, RTE_PKTMBUF_HEADROOM);
115                 return 0;
116         }
117         buf_size -= RTE_PKTMBUF_HEADROOM;
118
119         /* Calculate guaranteed data start alignment */
120         buf_aligned = sfc_rx_mbuf_data_alignment(mb_pool);
121
122         /* Reserve space for start alignment */
123         if (buf_aligned < nic_align_start) {
124                 start_alignment = nic_align_start - buf_aligned;
125                 if (buf_size <= start_alignment) {
126                         sfc_err(sa,
127                                 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u and buffer start alignment %u required by NIC",
128                                 mb_pool->name,
129                                 rte_pktmbuf_data_room_size(mb_pool),
130                                 RTE_PKTMBUF_HEADROOM, start_alignment);
131                         return 0;
132                 }
133                 buf_aligned = nic_align_start;
134                 buf_size -= start_alignment;
135         } else {
136                 start_alignment = 0;
137         }
138
139         /* Make sure that end padding does not write beyond the buffer */
140         if (buf_aligned < nic_align_end) {
141                 /*
142                  * Estimate space which can be lost. If guarnteed buffer
143                  * size is odd, lost space is (nic_align_end - 1). More
144                  * accurate formula is below.
145                  */
146                 end_padding_alignment = nic_align_end -
147                         MIN(buf_aligned, 1u << (rte_bsf32(buf_size) - 1));
148                 if (buf_size <= end_padding_alignment) {
149                         sfc_err(sa,
150                                 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u, buffer start alignment %u and end padding alignment %u required by NIC",
151                                 mb_pool->name,
152                                 rte_pktmbuf_data_room_size(mb_pool),
153                                 RTE_PKTMBUF_HEADROOM, start_alignment,
154                                 end_padding_alignment);
155                         return 0;
156                 }
157                 buf_size -= end_padding_alignment;
158         } else {
159                 /*
160                  * Start is aligned the same or better than end,
161                  * just align length.
162                  */
163                 buf_size = P2ALIGN(buf_size, nic_align_end);
164         }
165
166         return buf_size;
167 }
168
169 int
170 sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
171              uint16_t nb_rx_desc, unsigned int socket_id,
172              const struct rte_eth_rxconf *rx_conf,
173              struct rte_mempool *mb_pool)
174 {
175         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
176         int rc;
177         uint16_t buf_size;
178         struct sfc_rxq_info *rxq_info;
179         unsigned int evq_index;
180         struct sfc_evq *evq;
181         struct sfc_rxq *rxq;
182
183         rc = sfc_rx_qcheck_conf(sa, rx_conf);
184         if (rc != 0)
185                 goto fail_bad_conf;
186
187         buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool);
188         if (buf_size == 0) {
189                 sfc_err(sa, "RxQ %u mbuf pool object size is too small",
190                         sw_index);
191                 rc = EINVAL;
192                 goto fail_bad_conf;
193         }
194
195         if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
196             !sa->eth_dev->data->dev_conf.rxmode.enable_scatter) {
197                 sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
198                         "object size is too small", sw_index);
199                 sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
200                         "PDU size %u plus Rx prefix %u bytes",
201                         sw_index, buf_size, (unsigned int)sa->port.pdu,
202                         encp->enc_rx_prefix_size);
203                 rc = EINVAL;
204                 goto fail_bad_conf;
205         }
206
207         SFC_ASSERT(sw_index < sa->rxq_count);
208         rxq_info = &sa->rxq_info[sw_index];
209
210         SFC_ASSERT(nb_rx_desc <= rxq_info->max_entries);
211         rxq_info->entries = nb_rx_desc;
212         rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
213
214         evq_index = sfc_evq_index_by_rxq_sw_index(sa, sw_index);
215
216         rc = sfc_ev_qinit(sa, evq_index, rxq_info->entries, socket_id);
217         if (rc != 0)
218                 goto fail_ev_qinit;
219
220         evq = sa->evq_info[evq_index].evq;
221
222         rc = ENOMEM;
223         rxq = rte_zmalloc_socket("sfc-rxq", sizeof(*rxq), RTE_CACHE_LINE_SIZE,
224                                  socket_id);
225         if (rxq == NULL)
226                 goto fail_rxq_alloc;
227
228         rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries),
229                            socket_id, &rxq->mem);
230         if (rc != 0)
231                 goto fail_dma_alloc;
232
233         rc = ENOMEM;
234         rxq->sw_desc = rte_calloc_socket("sfc-rxq-sw_desc", rxq_info->entries,
235                                          sizeof(*rxq->sw_desc),
236                                          RTE_CACHE_LINE_SIZE, socket_id);
237         if (rxq->sw_desc == NULL)
238                 goto fail_desc_alloc;
239
240         evq->rxq = rxq;
241         rxq->evq = evq;
242         rxq->ptr_mask = rxq_info->entries - 1;
243         rxq->refill_mb_pool = mb_pool;
244         rxq->buf_size = buf_size;
245         rxq->hw_index = sw_index;
246
247         rxq->state = SFC_RXQ_INITIALIZED;
248
249         rxq_info->rxq = rxq;
250
251         return 0;
252
253 fail_desc_alloc:
254         sfc_dma_free(sa, &rxq->mem);
255
256 fail_dma_alloc:
257         rte_free(rxq);
258
259 fail_rxq_alloc:
260         sfc_ev_qfini(sa, evq_index);
261
262 fail_ev_qinit:
263         rxq_info->entries = 0;
264
265 fail_bad_conf:
266         sfc_log_init(sa, "failed %d", rc);
267         return rc;
268 }
269
270 void
271 sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
272 {
273         struct sfc_rxq_info *rxq_info;
274         struct sfc_rxq *rxq;
275
276         SFC_ASSERT(sw_index < sa->rxq_count);
277
278         rxq_info = &sa->rxq_info[sw_index];
279
280         rxq = rxq_info->rxq;
281         SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
282
283         rxq_info->rxq = NULL;
284         rxq_info->entries = 0;
285
286         rte_free(rxq->sw_desc);
287         sfc_dma_free(sa, &rxq->mem);
288         rte_free(rxq);
289 }
290
291 static int
292 sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
293 {
294         struct sfc_rxq_info *rxq_info = &sa->rxq_info[sw_index];
295         unsigned int max_entries;
296
297         max_entries = EFX_RXQ_MAXNDESCS;
298         SFC_ASSERT(rte_is_power_of_2(max_entries));
299
300         rxq_info->max_entries = max_entries;
301
302         return 0;
303 }
304
305 static int
306 sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
307 {
308         int rc = 0;
309
310         switch (rxmode->mq_mode) {
311         case ETH_MQ_RX_NONE:
312                 /* No special checks are required */
313                 break;
314         default:
315                 sfc_err(sa, "Rx multi-queue mode %u not supported",
316                         rxmode->mq_mode);
317                 rc = EINVAL;
318         }
319
320         if (rxmode->header_split) {
321                 sfc_err(sa, "Header split on Rx not supported");
322                 rc = EINVAL;
323         }
324
325         if (rxmode->hw_vlan_filter) {
326                 sfc_err(sa, "HW VLAN filtering not supported");
327                 rc = EINVAL;
328         }
329
330         if (rxmode->hw_vlan_strip) {
331                 sfc_err(sa, "HW VLAN stripping not supported");
332                 rc = EINVAL;
333         }
334
335         if (rxmode->hw_vlan_extend) {
336                 sfc_err(sa,
337                         "Q-in-Q HW VLAN stripping not supported");
338                 rc = EINVAL;
339         }
340
341         if (!rxmode->hw_strip_crc) {
342                 sfc_warn(sa,
343                          "FCS stripping control not supported - always stripped");
344                 rxmode->hw_strip_crc = 1;
345         }
346
347         if (rxmode->enable_scatter) {
348                 sfc_err(sa, "Scatter on Rx not supported");
349                 rc = EINVAL;
350         }
351
352         if (rxmode->enable_lro) {
353                 sfc_err(sa, "LRO not supported");
354                 rc = EINVAL;
355         }
356
357         return rc;
358 }
359
360 /**
361  * Initialize Rx subsystem.
362  *
363  * Called at device configuration stage when number of receive queues is
364  * specified together with other device level receive configuration.
365  *
366  * It should be used to allocate NUMA-unaware resources.
367  */
368 int
369 sfc_rx_init(struct sfc_adapter *sa)
370 {
371         struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
372         unsigned int sw_index;
373         int rc;
374
375         rc = sfc_rx_check_mode(sa, &dev_conf->rxmode);
376         if (rc != 0)
377                 goto fail_check_mode;
378
379         sa->rxq_count = sa->eth_dev->data->nb_rx_queues;
380
381         rc = ENOMEM;
382         sa->rxq_info = rte_calloc_socket("sfc-rxqs", sa->rxq_count,
383                                          sizeof(struct sfc_rxq_info), 0,
384                                          sa->socket_id);
385         if (sa->rxq_info == NULL)
386                 goto fail_rxqs_alloc;
387
388         for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
389                 rc = sfc_rx_qinit_info(sa, sw_index);
390                 if (rc != 0)
391                         goto fail_rx_qinit_info;
392         }
393
394         return 0;
395
396 fail_rx_qinit_info:
397         rte_free(sa->rxq_info);
398         sa->rxq_info = NULL;
399
400 fail_rxqs_alloc:
401         sa->rxq_count = 0;
402 fail_check_mode:
403         sfc_log_init(sa, "failed %d", rc);
404         return rc;
405 }
406
407 /**
408  * Shutdown Rx subsystem.
409  *
410  * Called at device close stage, for example, before device
411  * reconfiguration or shutdown.
412  */
413 void
414 sfc_rx_fini(struct sfc_adapter *sa)
415 {
416         unsigned int sw_index;
417
418         sw_index = sa->rxq_count;
419         while (sw_index-- > 0) {
420                 if (sa->rxq_info[sw_index].rxq != NULL)
421                         sfc_rx_qfini(sa, sw_index);
422         }
423
424         rte_free(sa->rxq_info);
425         sa->rxq_info = NULL;
426         sa->rxq_count = 0;
427 }