net/sfc: implement Rx queue start and stop operations
[dpdk.git] / drivers / net / sfc / sfc_rx.c
1 /*-
2  * Copyright (c) 2016 Solarflare Communications Inc.
3  * All rights reserved.
4  *
5  * This software was jointly developed between OKTET Labs (under contract
6  * for Solarflare) and Solarflare Communications, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright notice,
14  *    this list of conditions and the following disclaimer in the documentation
15  *    and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #include <rte_mempool.h>
31
32 #include "efx.h"
33
34 #include "sfc.h"
35 #include "sfc_log.h"
36 #include "sfc_ev.h"
37 #include "sfc_rx.h"
38 #include "sfc_tweak.h"
39
40 /*
41  * Maximum number of Rx queue flush attempt in the case of failure or
42  * flush timeout
43  */
44 #define SFC_RX_QFLUSH_ATTEMPTS          (3)
45
46 /*
47  * Time to wait between event queue polling attempts when waiting for Rx
48  * queue flush done or failed events.
49  */
50 #define SFC_RX_QFLUSH_POLL_WAIT_MS      (1)
51
52 /*
53  * Maximum number of event queue polling attempts when waiting for Rx queue
54  * flush done or failed events. It defines Rx queue flush attempt timeout
55  * together with SFC_RX_QFLUSH_POLL_WAIT_MS.
56  */
57 #define SFC_RX_QFLUSH_POLL_ATTEMPTS     (2000)
58
59 void
60 sfc_rx_qflush_done(struct sfc_rxq *rxq)
61 {
62         rxq->state |= SFC_RXQ_FLUSHED;
63         rxq->state &= ~SFC_RXQ_FLUSHING;
64 }
65
66 void
67 sfc_rx_qflush_failed(struct sfc_rxq *rxq)
68 {
69         rxq->state |= SFC_RXQ_FLUSH_FAILED;
70         rxq->state &= ~SFC_RXQ_FLUSHING;
71 }
72
73 static void
74 sfc_rx_qrefill(struct sfc_rxq *rxq)
75 {
76         unsigned int free_space;
77         unsigned int bulks;
78         void *objs[SFC_RX_REFILL_BULK];
79         efsys_dma_addr_t addr[RTE_DIM(objs)];
80         unsigned int added = rxq->added;
81         unsigned int id;
82         unsigned int i;
83         struct sfc_rx_sw_desc *rxd;
84         struct rte_mbuf *m;
85         uint8_t port_id = rxq->port_id;
86
87         free_space = EFX_RXQ_LIMIT(rxq->ptr_mask + 1) -
88                 (added - rxq->completed);
89         bulks = free_space / RTE_DIM(objs);
90
91         id = added & rxq->ptr_mask;
92         while (bulks-- > 0) {
93                 if (rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
94                                          RTE_DIM(objs)) < 0) {
95                         /*
96                          * It is hardly a safe way to increment counter
97                          * from different contexts, but all PMDs do it.
98                          */
99                         rxq->evq->sa->eth_dev->data->rx_mbuf_alloc_failed +=
100                                 RTE_DIM(objs);
101                         break;
102                 }
103
104                 for (i = 0; i < RTE_DIM(objs);
105                      ++i, id = (id + 1) & rxq->ptr_mask) {
106                         m = objs[i];
107
108                         rxd = &rxq->sw_desc[id];
109                         rxd->mbuf = m;
110
111                         rte_mbuf_refcnt_set(m, 1);
112                         m->data_off = RTE_PKTMBUF_HEADROOM;
113                         m->next = NULL;
114                         m->nb_segs = 1;
115                         m->port = port_id;
116
117                         addr[i] = rte_pktmbuf_mtophys(m);
118                 }
119
120                 efx_rx_qpost(rxq->common, addr, rxq->buf_size,
121                              RTE_DIM(objs), rxq->completed, added);
122                 added += RTE_DIM(objs);
123         }
124
125         /* Push doorbell if something is posted */
126         if (rxq->added != added) {
127                 rxq->added = added;
128                 efx_rx_qpush(rxq->common, added, &rxq->pushed);
129         }
130 }
131
132 static void
133 sfc_rx_qpurge(struct sfc_rxq *rxq)
134 {
135         unsigned int i;
136         struct sfc_rx_sw_desc *rxd;
137
138         for (i = rxq->completed; i != rxq->added; ++i) {
139                 rxd = &rxq->sw_desc[i & rxq->ptr_mask];
140                 rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
141                 rxd->mbuf = NULL;
142         }
143 }
144
145 static void
146 sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
147 {
148         struct sfc_rxq *rxq;
149         unsigned int retry_count;
150         unsigned int wait_count;
151
152         rxq = sa->rxq_info[sw_index].rxq;
153         SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
154
155         /*
156          * Retry Rx queue flushing in the case of flush failed or
157          * timeout. In the worst case it can delay for 6 seconds.
158          */
159         for (retry_count = 0;
160              ((rxq->state & SFC_RXQ_FLUSHED) == 0) &&
161              (retry_count < SFC_RX_QFLUSH_ATTEMPTS);
162              ++retry_count) {
163                 if (efx_rx_qflush(rxq->common) != 0) {
164                         rxq->state |= SFC_RXQ_FLUSH_FAILED;
165                         break;
166                 }
167                 rxq->state &= ~SFC_RXQ_FLUSH_FAILED;
168                 rxq->state |= SFC_RXQ_FLUSHING;
169
170                 /*
171                  * Wait for Rx queue flush done or failed event at least
172                  * SFC_RX_QFLUSH_POLL_WAIT_MS milliseconds and not more
173                  * than 2 seconds (SFC_RX_QFLUSH_POLL_WAIT_MS multiplied
174                  * by SFC_RX_QFLUSH_POLL_ATTEMPTS).
175                  */
176                 wait_count = 0;
177                 do {
178                         rte_delay_ms(SFC_RX_QFLUSH_POLL_WAIT_MS);
179                         sfc_ev_qpoll(rxq->evq);
180                 } while ((rxq->state & SFC_RXQ_FLUSHING) &&
181                          (wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS));
182
183                 if (rxq->state & SFC_RXQ_FLUSHING)
184                         sfc_err(sa, "RxQ %u flush timed out", sw_index);
185
186                 if (rxq->state & SFC_RXQ_FLUSH_FAILED)
187                         sfc_err(sa, "RxQ %u flush failed", sw_index);
188
189                 if (rxq->state & SFC_RXQ_FLUSHED)
190                         sfc_info(sa, "RxQ %u flushed", sw_index);
191         }
192
193         sfc_rx_qpurge(rxq);
194 }
195
196 int
197 sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
198 {
199         struct sfc_rxq_info *rxq_info;
200         struct sfc_rxq *rxq;
201         struct sfc_evq *evq;
202         int rc;
203
204         sfc_log_init(sa, "sw_index=%u", sw_index);
205
206         SFC_ASSERT(sw_index < sa->rxq_count);
207
208         rxq_info = &sa->rxq_info[sw_index];
209         rxq = rxq_info->rxq;
210         SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
211
212         evq = rxq->evq;
213
214         rc = sfc_ev_qstart(sa, evq->evq_index);
215         if (rc != 0)
216                 goto fail_ev_qstart;
217
218         rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
219                             &rxq->mem, rxq_info->entries,
220                             0 /* not used on EF10 */, evq->common,
221                             &rxq->common);
222         if (rc != 0)
223                 goto fail_rx_qcreate;
224
225         efx_rx_qenable(rxq->common);
226
227         rxq->pending = rxq->completed = rxq->added = rxq->pushed = 0;
228
229         rxq->state |= SFC_RXQ_STARTED;
230
231         sfc_rx_qrefill(rxq);
232
233         if (sw_index == 0) {
234                 rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common,
235                                                     B_FALSE);
236                 if (rc != 0)
237                         goto fail_mac_filter_default_rxq_set;
238         }
239
240         /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
241         sa->eth_dev->data->rx_queue_state[sw_index] =
242                 RTE_ETH_QUEUE_STATE_STARTED;
243
244         return 0;
245
246 fail_mac_filter_default_rxq_set:
247         sfc_rx_qflush(sa, sw_index);
248
249 fail_rx_qcreate:
250         sfc_ev_qstop(sa, evq->evq_index);
251
252 fail_ev_qstart:
253         return rc;
254 }
255
256 void
257 sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
258 {
259         struct sfc_rxq_info *rxq_info;
260         struct sfc_rxq *rxq;
261
262         sfc_log_init(sa, "sw_index=%u", sw_index);
263
264         SFC_ASSERT(sw_index < sa->rxq_count);
265
266         rxq_info = &sa->rxq_info[sw_index];
267         rxq = rxq_info->rxq;
268         SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
269
270         /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
271         sa->eth_dev->data->rx_queue_state[sw_index] =
272                 RTE_ETH_QUEUE_STATE_STOPPED;
273
274         if (sw_index == 0)
275                 efx_mac_filter_default_rxq_clear(sa->nic);
276
277         sfc_rx_qflush(sa, sw_index);
278
279         rxq->state = SFC_RXQ_INITIALIZED;
280
281         efx_rx_qdestroy(rxq->common);
282
283         sfc_ev_qstop(sa, rxq->evq->evq_index);
284 }
285
286 static int
287 sfc_rx_qcheck_conf(struct sfc_adapter *sa,
288                    const struct rte_eth_rxconf *rx_conf)
289 {
290         int rc = 0;
291
292         if (rx_conf->rx_thresh.pthresh != 0 ||
293             rx_conf->rx_thresh.hthresh != 0 ||
294             rx_conf->rx_thresh.wthresh != 0) {
295                 sfc_err(sa,
296                         "RxQ prefetch/host/writeback thresholds are not supported");
297                 rc = EINVAL;
298         }
299
300         if (rx_conf->rx_free_thresh != 0) {
301                 sfc_err(sa, "RxQ free threshold is not supported");
302                 rc = EINVAL;
303         }
304
305         if (rx_conf->rx_drop_en == 0) {
306                 sfc_err(sa, "RxQ drop disable is not supported");
307                 rc = EINVAL;
308         }
309
310         if (rx_conf->rx_deferred_start != 0) {
311                 sfc_err(sa, "RxQ deferred start is not supported");
312                 rc = EINVAL;
313         }
314
315         return rc;
316 }
317
318 static unsigned int
319 sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool)
320 {
321         uint32_t data_off;
322         uint32_t order;
323
324         /* The mbuf object itself is always cache line aligned */
325         order = rte_bsf32(RTE_CACHE_LINE_SIZE);
326
327         /* Data offset from mbuf object start */
328         data_off = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mb_pool) +
329                 RTE_PKTMBUF_HEADROOM;
330
331         order = MIN(order, rte_bsf32(data_off));
332
333         return 1u << (order - 1);
334 }
335
336 static uint16_t
337 sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool)
338 {
339         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
340         const uint32_t nic_align_start = MAX(1, encp->enc_rx_buf_align_start);
341         const uint32_t nic_align_end = MAX(1, encp->enc_rx_buf_align_end);
342         uint16_t buf_size;
343         unsigned int buf_aligned;
344         unsigned int start_alignment;
345         unsigned int end_padding_alignment;
346
347         /* Below it is assumed that both alignments are power of 2 */
348         SFC_ASSERT(rte_is_power_of_2(nic_align_start));
349         SFC_ASSERT(rte_is_power_of_2(nic_align_end));
350
351         /*
352          * mbuf is always cache line aligned, double-check
353          * that it meets rx buffer start alignment requirements.
354          */
355
356         /* Start from mbuf pool data room size */
357         buf_size = rte_pktmbuf_data_room_size(mb_pool);
358
359         /* Remove headroom */
360         if (buf_size <= RTE_PKTMBUF_HEADROOM) {
361                 sfc_err(sa,
362                         "RxQ mbuf pool %s object data room size %u is smaller than headroom %u",
363                         mb_pool->name, buf_size, RTE_PKTMBUF_HEADROOM);
364                 return 0;
365         }
366         buf_size -= RTE_PKTMBUF_HEADROOM;
367
368         /* Calculate guaranteed data start alignment */
369         buf_aligned = sfc_rx_mbuf_data_alignment(mb_pool);
370
371         /* Reserve space for start alignment */
372         if (buf_aligned < nic_align_start) {
373                 start_alignment = nic_align_start - buf_aligned;
374                 if (buf_size <= start_alignment) {
375                         sfc_err(sa,
376                                 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u and buffer start alignment %u required by NIC",
377                                 mb_pool->name,
378                                 rte_pktmbuf_data_room_size(mb_pool),
379                                 RTE_PKTMBUF_HEADROOM, start_alignment);
380                         return 0;
381                 }
382                 buf_aligned = nic_align_start;
383                 buf_size -= start_alignment;
384         } else {
385                 start_alignment = 0;
386         }
387
388         /* Make sure that end padding does not write beyond the buffer */
389         if (buf_aligned < nic_align_end) {
390                 /*
391                  * Estimate space which can be lost. If guarnteed buffer
392                  * size is odd, lost space is (nic_align_end - 1). More
393                  * accurate formula is below.
394                  */
395                 end_padding_alignment = nic_align_end -
396                         MIN(buf_aligned, 1u << (rte_bsf32(buf_size) - 1));
397                 if (buf_size <= end_padding_alignment) {
398                         sfc_err(sa,
399                                 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u, buffer start alignment %u and end padding alignment %u required by NIC",
400                                 mb_pool->name,
401                                 rte_pktmbuf_data_room_size(mb_pool),
402                                 RTE_PKTMBUF_HEADROOM, start_alignment,
403                                 end_padding_alignment);
404                         return 0;
405                 }
406                 buf_size -= end_padding_alignment;
407         } else {
408                 /*
409                  * Start is aligned the same or better than end,
410                  * just align length.
411                  */
412                 buf_size = P2ALIGN(buf_size, nic_align_end);
413         }
414
415         return buf_size;
416 }
417
418 int
419 sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
420              uint16_t nb_rx_desc, unsigned int socket_id,
421              const struct rte_eth_rxconf *rx_conf,
422              struct rte_mempool *mb_pool)
423 {
424         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
425         int rc;
426         uint16_t buf_size;
427         struct sfc_rxq_info *rxq_info;
428         unsigned int evq_index;
429         struct sfc_evq *evq;
430         struct sfc_rxq *rxq;
431
432         rc = sfc_rx_qcheck_conf(sa, rx_conf);
433         if (rc != 0)
434                 goto fail_bad_conf;
435
436         buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool);
437         if (buf_size == 0) {
438                 sfc_err(sa, "RxQ %u mbuf pool object size is too small",
439                         sw_index);
440                 rc = EINVAL;
441                 goto fail_bad_conf;
442         }
443
444         if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
445             !sa->eth_dev->data->dev_conf.rxmode.enable_scatter) {
446                 sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
447                         "object size is too small", sw_index);
448                 sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
449                         "PDU size %u plus Rx prefix %u bytes",
450                         sw_index, buf_size, (unsigned int)sa->port.pdu,
451                         encp->enc_rx_prefix_size);
452                 rc = EINVAL;
453                 goto fail_bad_conf;
454         }
455
456         SFC_ASSERT(sw_index < sa->rxq_count);
457         rxq_info = &sa->rxq_info[sw_index];
458
459         SFC_ASSERT(nb_rx_desc <= rxq_info->max_entries);
460         rxq_info->entries = nb_rx_desc;
461         rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
462
463         evq_index = sfc_evq_index_by_rxq_sw_index(sa, sw_index);
464
465         rc = sfc_ev_qinit(sa, evq_index, rxq_info->entries, socket_id);
466         if (rc != 0)
467                 goto fail_ev_qinit;
468
469         evq = sa->evq_info[evq_index].evq;
470
471         rc = ENOMEM;
472         rxq = rte_zmalloc_socket("sfc-rxq", sizeof(*rxq), RTE_CACHE_LINE_SIZE,
473                                  socket_id);
474         if (rxq == NULL)
475                 goto fail_rxq_alloc;
476
477         rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries),
478                            socket_id, &rxq->mem);
479         if (rc != 0)
480                 goto fail_dma_alloc;
481
482         rc = ENOMEM;
483         rxq->sw_desc = rte_calloc_socket("sfc-rxq-sw_desc", rxq_info->entries,
484                                          sizeof(*rxq->sw_desc),
485                                          RTE_CACHE_LINE_SIZE, socket_id);
486         if (rxq->sw_desc == NULL)
487                 goto fail_desc_alloc;
488
489         evq->rxq = rxq;
490         rxq->evq = evq;
491         rxq->ptr_mask = rxq_info->entries - 1;
492         rxq->refill_mb_pool = mb_pool;
493         rxq->buf_size = buf_size;
494         rxq->hw_index = sw_index;
495         rxq->port_id = sa->eth_dev->data->port_id;
496
497         rxq->state = SFC_RXQ_INITIALIZED;
498
499         rxq_info->rxq = rxq;
500
501         return 0;
502
503 fail_desc_alloc:
504         sfc_dma_free(sa, &rxq->mem);
505
506 fail_dma_alloc:
507         rte_free(rxq);
508
509 fail_rxq_alloc:
510         sfc_ev_qfini(sa, evq_index);
511
512 fail_ev_qinit:
513         rxq_info->entries = 0;
514
515 fail_bad_conf:
516         sfc_log_init(sa, "failed %d", rc);
517         return rc;
518 }
519
520 void
521 sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
522 {
523         struct sfc_rxq_info *rxq_info;
524         struct sfc_rxq *rxq;
525
526         SFC_ASSERT(sw_index < sa->rxq_count);
527
528         rxq_info = &sa->rxq_info[sw_index];
529
530         rxq = rxq_info->rxq;
531         SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
532
533         rxq_info->rxq = NULL;
534         rxq_info->entries = 0;
535
536         rte_free(rxq->sw_desc);
537         sfc_dma_free(sa, &rxq->mem);
538         rte_free(rxq);
539 }
540
541 int
542 sfc_rx_start(struct sfc_adapter *sa)
543 {
544         unsigned int sw_index;
545         int rc;
546
547         sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
548
549         rc = efx_rx_init(sa->nic);
550         if (rc != 0)
551                 goto fail_rx_init;
552
553         for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
554                 rc = sfc_rx_qstart(sa, sw_index);
555                 if (rc != 0)
556                         goto fail_rx_qstart;
557         }
558
559         return 0;
560
561 fail_rx_qstart:
562         while (sw_index-- > 0)
563                 sfc_rx_qstop(sa, sw_index);
564
565         efx_rx_fini(sa->nic);
566
567 fail_rx_init:
568         sfc_log_init(sa, "failed %d", rc);
569         return rc;
570 }
571
572 void
573 sfc_rx_stop(struct sfc_adapter *sa)
574 {
575         unsigned int sw_index;
576
577         sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
578
579         sw_index = sa->rxq_count;
580         while (sw_index-- > 0) {
581                 if (sa->rxq_info[sw_index].rxq != NULL)
582                         sfc_rx_qstop(sa, sw_index);
583         }
584
585         efx_rx_fini(sa->nic);
586 }
587
588 static int
589 sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
590 {
591         struct sfc_rxq_info *rxq_info = &sa->rxq_info[sw_index];
592         unsigned int max_entries;
593
594         max_entries = EFX_RXQ_MAXNDESCS;
595         SFC_ASSERT(rte_is_power_of_2(max_entries));
596
597         rxq_info->max_entries = max_entries;
598
599         return 0;
600 }
601
602 static int
603 sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
604 {
605         int rc = 0;
606
607         switch (rxmode->mq_mode) {
608         case ETH_MQ_RX_NONE:
609                 /* No special checks are required */
610                 break;
611         default:
612                 sfc_err(sa, "Rx multi-queue mode %u not supported",
613                         rxmode->mq_mode);
614                 rc = EINVAL;
615         }
616
617         if (rxmode->header_split) {
618                 sfc_err(sa, "Header split on Rx not supported");
619                 rc = EINVAL;
620         }
621
622         if (rxmode->hw_vlan_filter) {
623                 sfc_err(sa, "HW VLAN filtering not supported");
624                 rc = EINVAL;
625         }
626
627         if (rxmode->hw_vlan_strip) {
628                 sfc_err(sa, "HW VLAN stripping not supported");
629                 rc = EINVAL;
630         }
631
632         if (rxmode->hw_vlan_extend) {
633                 sfc_err(sa,
634                         "Q-in-Q HW VLAN stripping not supported");
635                 rc = EINVAL;
636         }
637
638         if (!rxmode->hw_strip_crc) {
639                 sfc_warn(sa,
640                          "FCS stripping control not supported - always stripped");
641                 rxmode->hw_strip_crc = 1;
642         }
643
644         if (rxmode->enable_scatter) {
645                 sfc_err(sa, "Scatter on Rx not supported");
646                 rc = EINVAL;
647         }
648
649         if (rxmode->enable_lro) {
650                 sfc_err(sa, "LRO not supported");
651                 rc = EINVAL;
652         }
653
654         return rc;
655 }
656
657 /**
658  * Initialize Rx subsystem.
659  *
660  * Called at device configuration stage when number of receive queues is
661  * specified together with other device level receive configuration.
662  *
663  * It should be used to allocate NUMA-unaware resources.
664  */
665 int
666 sfc_rx_init(struct sfc_adapter *sa)
667 {
668         struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
669         unsigned int sw_index;
670         int rc;
671
672         rc = sfc_rx_check_mode(sa, &dev_conf->rxmode);
673         if (rc != 0)
674                 goto fail_check_mode;
675
676         sa->rxq_count = sa->eth_dev->data->nb_rx_queues;
677
678         rc = ENOMEM;
679         sa->rxq_info = rte_calloc_socket("sfc-rxqs", sa->rxq_count,
680                                          sizeof(struct sfc_rxq_info), 0,
681                                          sa->socket_id);
682         if (sa->rxq_info == NULL)
683                 goto fail_rxqs_alloc;
684
685         for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
686                 rc = sfc_rx_qinit_info(sa, sw_index);
687                 if (rc != 0)
688                         goto fail_rx_qinit_info;
689         }
690
691         return 0;
692
693 fail_rx_qinit_info:
694         rte_free(sa->rxq_info);
695         sa->rxq_info = NULL;
696
697 fail_rxqs_alloc:
698         sa->rxq_count = 0;
699 fail_check_mode:
700         sfc_log_init(sa, "failed %d", rc);
701         return rc;
702 }
703
704 /**
705  * Shutdown Rx subsystem.
706  *
707  * Called at device close stage, for example, before device
708  * reconfiguration or shutdown.
709  */
710 void
711 sfc_rx_fini(struct sfc_adapter *sa)
712 {
713         unsigned int sw_index;
714
715         sw_index = sa->rxq_count;
716         while (sw_index-- > 0) {
717                 if (sa->rxq_info[sw_index].rxq != NULL)
718                         sfc_rx_qfini(sa, sw_index);
719         }
720
721         rte_free(sa->rxq_info);
722         sa->rxq_info = NULL;
723         sa->rxq_count = 0;
724 }