net/sfc: implement device callback to Rx burst of packets
[dpdk.git] / drivers / net / sfc / sfc_rx.c
1 /*-
2  * Copyright (c) 2016 Solarflare Communications Inc.
3  * All rights reserved.
4  *
5  * This software was jointly developed between OKTET Labs (under contract
6  * for Solarflare) and Solarflare Communications, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright notice,
14  *    this list of conditions and the following disclaimer in the documentation
15  *    and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #include <rte_mempool.h>
31
32 #include "efx.h"
33
34 #include "sfc.h"
35 #include "sfc_debug.h"
36 #include "sfc_log.h"
37 #include "sfc_ev.h"
38 #include "sfc_rx.h"
39 #include "sfc_tweak.h"
40
41 /*
42  * Maximum number of Rx queue flush attempt in the case of failure or
43  * flush timeout
44  */
45 #define SFC_RX_QFLUSH_ATTEMPTS          (3)
46
47 /*
48  * Time to wait between event queue polling attempts when waiting for Rx
49  * queue flush done or failed events.
50  */
51 #define SFC_RX_QFLUSH_POLL_WAIT_MS      (1)
52
53 /*
54  * Maximum number of event queue polling attempts when waiting for Rx queue
55  * flush done or failed events. It defines Rx queue flush attempt timeout
56  * together with SFC_RX_QFLUSH_POLL_WAIT_MS.
57  */
58 #define SFC_RX_QFLUSH_POLL_ATTEMPTS     (2000)
59
60 void
61 sfc_rx_qflush_done(struct sfc_rxq *rxq)
62 {
63         rxq->state |= SFC_RXQ_FLUSHED;
64         rxq->state &= ~SFC_RXQ_FLUSHING;
65 }
66
67 void
68 sfc_rx_qflush_failed(struct sfc_rxq *rxq)
69 {
70         rxq->state |= SFC_RXQ_FLUSH_FAILED;
71         rxq->state &= ~SFC_RXQ_FLUSHING;
72 }
73
74 static void
75 sfc_rx_qrefill(struct sfc_rxq *rxq)
76 {
77         unsigned int free_space;
78         unsigned int bulks;
79         void *objs[SFC_RX_REFILL_BULK];
80         efsys_dma_addr_t addr[RTE_DIM(objs)];
81         unsigned int added = rxq->added;
82         unsigned int id;
83         unsigned int i;
84         struct sfc_rx_sw_desc *rxd;
85         struct rte_mbuf *m;
86         uint8_t port_id = rxq->port_id;
87
88         free_space = EFX_RXQ_LIMIT(rxq->ptr_mask + 1) -
89                 (added - rxq->completed);
90         bulks = free_space / RTE_DIM(objs);
91
92         id = added & rxq->ptr_mask;
93         while (bulks-- > 0) {
94                 if (rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
95                                          RTE_DIM(objs)) < 0) {
96                         /*
97                          * It is hardly a safe way to increment counter
98                          * from different contexts, but all PMDs do it.
99                          */
100                         rxq->evq->sa->eth_dev->data->rx_mbuf_alloc_failed +=
101                                 RTE_DIM(objs);
102                         break;
103                 }
104
105                 for (i = 0; i < RTE_DIM(objs);
106                      ++i, id = (id + 1) & rxq->ptr_mask) {
107                         m = objs[i];
108
109                         rxd = &rxq->sw_desc[id];
110                         rxd->mbuf = m;
111
112                         rte_mbuf_refcnt_set(m, 1);
113                         m->data_off = RTE_PKTMBUF_HEADROOM;
114                         m->next = NULL;
115                         m->nb_segs = 1;
116                         m->port = port_id;
117
118                         addr[i] = rte_pktmbuf_mtophys(m);
119                 }
120
121                 efx_rx_qpost(rxq->common, addr, rxq->buf_size,
122                              RTE_DIM(objs), rxq->completed, added);
123                 added += RTE_DIM(objs);
124         }
125
126         /* Push doorbell if something is posted */
127         if (rxq->added != added) {
128                 rxq->added = added;
129                 efx_rx_qpush(rxq->common, added, &rxq->pushed);
130         }
131 }
132
133 uint16_t
134 sfc_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
135 {
136         struct sfc_rxq *rxq = rx_queue;
137         unsigned int completed;
138         unsigned int prefix_size = rxq->prefix_size;
139         unsigned int done_pkts = 0;
140
141         if (unlikely((rxq->state & SFC_RXQ_RUNNING) == 0))
142                 return 0;
143
144         sfc_ev_qpoll(rxq->evq);
145
146         completed = rxq->completed;
147         while (completed != rxq->pending && done_pkts < nb_pkts) {
148                 unsigned int id;
149                 struct sfc_rx_sw_desc *rxd;
150                 struct rte_mbuf *m;
151                 unsigned int seg_len;
152                 unsigned int desc_flags;
153
154                 id = completed++ & rxq->ptr_mask;
155                 rxd = &rxq->sw_desc[id];
156                 m = rxd->mbuf;
157                 desc_flags = rxd->flags;
158
159                 if (desc_flags & (EFX_ADDR_MISMATCH | EFX_DISCARD))
160                         goto discard;
161
162                 if (desc_flags & EFX_PKT_PREFIX_LEN) {
163                         uint16_t tmp_size;
164                         int rc __rte_unused;
165
166                         rc = efx_pseudo_hdr_pkt_length_get(rxq->common,
167                                 rte_pktmbuf_mtod(m, uint8_t *), &tmp_size);
168                         SFC_ASSERT(rc == 0);
169                         seg_len = tmp_size;
170                 } else {
171                         seg_len = rxd->size - prefix_size;
172                 }
173
174                 m->data_off += prefix_size;
175                 rte_pktmbuf_data_len(m) = seg_len;
176                 rte_pktmbuf_pkt_len(m) = seg_len;
177
178                 m->packet_type = RTE_PTYPE_L2_ETHER;
179
180                 *rx_pkts++ = m;
181                 done_pkts++;
182                 continue;
183
184 discard:
185                 rte_mempool_put(rxq->refill_mb_pool, m);
186                 rxd->mbuf = NULL;
187         }
188
189         rxq->completed = completed;
190
191         sfc_rx_qrefill(rxq);
192
193         return done_pkts;
194 }
195
196 static void
197 sfc_rx_qpurge(struct sfc_rxq *rxq)
198 {
199         unsigned int i;
200         struct sfc_rx_sw_desc *rxd;
201
202         for (i = rxq->completed; i != rxq->added; ++i) {
203                 rxd = &rxq->sw_desc[i & rxq->ptr_mask];
204                 rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
205                 rxd->mbuf = NULL;
206         }
207 }
208
209 static void
210 sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
211 {
212         struct sfc_rxq *rxq;
213         unsigned int retry_count;
214         unsigned int wait_count;
215
216         rxq = sa->rxq_info[sw_index].rxq;
217         SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
218
219         /*
220          * Retry Rx queue flushing in the case of flush failed or
221          * timeout. In the worst case it can delay for 6 seconds.
222          */
223         for (retry_count = 0;
224              ((rxq->state & SFC_RXQ_FLUSHED) == 0) &&
225              (retry_count < SFC_RX_QFLUSH_ATTEMPTS);
226              ++retry_count) {
227                 if (efx_rx_qflush(rxq->common) != 0) {
228                         rxq->state |= SFC_RXQ_FLUSH_FAILED;
229                         break;
230                 }
231                 rxq->state &= ~SFC_RXQ_FLUSH_FAILED;
232                 rxq->state |= SFC_RXQ_FLUSHING;
233
234                 /*
235                  * Wait for Rx queue flush done or failed event at least
236                  * SFC_RX_QFLUSH_POLL_WAIT_MS milliseconds and not more
237                  * than 2 seconds (SFC_RX_QFLUSH_POLL_WAIT_MS multiplied
238                  * by SFC_RX_QFLUSH_POLL_ATTEMPTS).
239                  */
240                 wait_count = 0;
241                 do {
242                         rte_delay_ms(SFC_RX_QFLUSH_POLL_WAIT_MS);
243                         sfc_ev_qpoll(rxq->evq);
244                 } while ((rxq->state & SFC_RXQ_FLUSHING) &&
245                          (wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS));
246
247                 if (rxq->state & SFC_RXQ_FLUSHING)
248                         sfc_err(sa, "RxQ %u flush timed out", sw_index);
249
250                 if (rxq->state & SFC_RXQ_FLUSH_FAILED)
251                         sfc_err(sa, "RxQ %u flush failed", sw_index);
252
253                 if (rxq->state & SFC_RXQ_FLUSHED)
254                         sfc_info(sa, "RxQ %u flushed", sw_index);
255         }
256
257         sfc_rx_qpurge(rxq);
258 }
259
260 int
261 sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
262 {
263         struct sfc_rxq_info *rxq_info;
264         struct sfc_rxq *rxq;
265         struct sfc_evq *evq;
266         int rc;
267
268         sfc_log_init(sa, "sw_index=%u", sw_index);
269
270         SFC_ASSERT(sw_index < sa->rxq_count);
271
272         rxq_info = &sa->rxq_info[sw_index];
273         rxq = rxq_info->rxq;
274         SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
275
276         evq = rxq->evq;
277
278         rc = sfc_ev_qstart(sa, evq->evq_index);
279         if (rc != 0)
280                 goto fail_ev_qstart;
281
282         rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
283                             &rxq->mem, rxq_info->entries,
284                             0 /* not used on EF10 */, evq->common,
285                             &rxq->common);
286         if (rc != 0)
287                 goto fail_rx_qcreate;
288
289         efx_rx_qenable(rxq->common);
290
291         rxq->pending = rxq->completed = rxq->added = rxq->pushed = 0;
292
293         rxq->state |= (SFC_RXQ_STARTED | SFC_RXQ_RUNNING);
294
295         sfc_rx_qrefill(rxq);
296
297         if (sw_index == 0) {
298                 rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common,
299                                                     B_FALSE);
300                 if (rc != 0)
301                         goto fail_mac_filter_default_rxq_set;
302         }
303
304         /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
305         sa->eth_dev->data->rx_queue_state[sw_index] =
306                 RTE_ETH_QUEUE_STATE_STARTED;
307
308         return 0;
309
310 fail_mac_filter_default_rxq_set:
311         sfc_rx_qflush(sa, sw_index);
312
313 fail_rx_qcreate:
314         sfc_ev_qstop(sa, evq->evq_index);
315
316 fail_ev_qstart:
317         return rc;
318 }
319
320 void
321 sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
322 {
323         struct sfc_rxq_info *rxq_info;
324         struct sfc_rxq *rxq;
325
326         sfc_log_init(sa, "sw_index=%u", sw_index);
327
328         SFC_ASSERT(sw_index < sa->rxq_count);
329
330         rxq_info = &sa->rxq_info[sw_index];
331         rxq = rxq_info->rxq;
332         SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
333
334         /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
335         sa->eth_dev->data->rx_queue_state[sw_index] =
336                 RTE_ETH_QUEUE_STATE_STOPPED;
337
338         rxq->state &= ~SFC_RXQ_RUNNING;
339
340         if (sw_index == 0)
341                 efx_mac_filter_default_rxq_clear(sa->nic);
342
343         sfc_rx_qflush(sa, sw_index);
344
345         rxq->state = SFC_RXQ_INITIALIZED;
346
347         efx_rx_qdestroy(rxq->common);
348
349         sfc_ev_qstop(sa, rxq->evq->evq_index);
350 }
351
352 static int
353 sfc_rx_qcheck_conf(struct sfc_adapter *sa,
354                    const struct rte_eth_rxconf *rx_conf)
355 {
356         int rc = 0;
357
358         if (rx_conf->rx_thresh.pthresh != 0 ||
359             rx_conf->rx_thresh.hthresh != 0 ||
360             rx_conf->rx_thresh.wthresh != 0) {
361                 sfc_err(sa,
362                         "RxQ prefetch/host/writeback thresholds are not supported");
363                 rc = EINVAL;
364         }
365
366         if (rx_conf->rx_free_thresh != 0) {
367                 sfc_err(sa, "RxQ free threshold is not supported");
368                 rc = EINVAL;
369         }
370
371         if (rx_conf->rx_drop_en == 0) {
372                 sfc_err(sa, "RxQ drop disable is not supported");
373                 rc = EINVAL;
374         }
375
376         if (rx_conf->rx_deferred_start != 0) {
377                 sfc_err(sa, "RxQ deferred start is not supported");
378                 rc = EINVAL;
379         }
380
381         return rc;
382 }
383
384 static unsigned int
385 sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool)
386 {
387         uint32_t data_off;
388         uint32_t order;
389
390         /* The mbuf object itself is always cache line aligned */
391         order = rte_bsf32(RTE_CACHE_LINE_SIZE);
392
393         /* Data offset from mbuf object start */
394         data_off = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mb_pool) +
395                 RTE_PKTMBUF_HEADROOM;
396
397         order = MIN(order, rte_bsf32(data_off));
398
399         return 1u << (order - 1);
400 }
401
402 static uint16_t
403 sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool)
404 {
405         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
406         const uint32_t nic_align_start = MAX(1, encp->enc_rx_buf_align_start);
407         const uint32_t nic_align_end = MAX(1, encp->enc_rx_buf_align_end);
408         uint16_t buf_size;
409         unsigned int buf_aligned;
410         unsigned int start_alignment;
411         unsigned int end_padding_alignment;
412
413         /* Below it is assumed that both alignments are power of 2 */
414         SFC_ASSERT(rte_is_power_of_2(nic_align_start));
415         SFC_ASSERT(rte_is_power_of_2(nic_align_end));
416
417         /*
418          * mbuf is always cache line aligned, double-check
419          * that it meets rx buffer start alignment requirements.
420          */
421
422         /* Start from mbuf pool data room size */
423         buf_size = rte_pktmbuf_data_room_size(mb_pool);
424
425         /* Remove headroom */
426         if (buf_size <= RTE_PKTMBUF_HEADROOM) {
427                 sfc_err(sa,
428                         "RxQ mbuf pool %s object data room size %u is smaller than headroom %u",
429                         mb_pool->name, buf_size, RTE_PKTMBUF_HEADROOM);
430                 return 0;
431         }
432         buf_size -= RTE_PKTMBUF_HEADROOM;
433
434         /* Calculate guaranteed data start alignment */
435         buf_aligned = sfc_rx_mbuf_data_alignment(mb_pool);
436
437         /* Reserve space for start alignment */
438         if (buf_aligned < nic_align_start) {
439                 start_alignment = nic_align_start - buf_aligned;
440                 if (buf_size <= start_alignment) {
441                         sfc_err(sa,
442                                 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u and buffer start alignment %u required by NIC",
443                                 mb_pool->name,
444                                 rte_pktmbuf_data_room_size(mb_pool),
445                                 RTE_PKTMBUF_HEADROOM, start_alignment);
446                         return 0;
447                 }
448                 buf_aligned = nic_align_start;
449                 buf_size -= start_alignment;
450         } else {
451                 start_alignment = 0;
452         }
453
454         /* Make sure that end padding does not write beyond the buffer */
455         if (buf_aligned < nic_align_end) {
456                 /*
457                  * Estimate space which can be lost. If guarnteed buffer
458                  * size is odd, lost space is (nic_align_end - 1). More
459                  * accurate formula is below.
460                  */
461                 end_padding_alignment = nic_align_end -
462                         MIN(buf_aligned, 1u << (rte_bsf32(buf_size) - 1));
463                 if (buf_size <= end_padding_alignment) {
464                         sfc_err(sa,
465                                 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u, buffer start alignment %u and end padding alignment %u required by NIC",
466                                 mb_pool->name,
467                                 rte_pktmbuf_data_room_size(mb_pool),
468                                 RTE_PKTMBUF_HEADROOM, start_alignment,
469                                 end_padding_alignment);
470                         return 0;
471                 }
472                 buf_size -= end_padding_alignment;
473         } else {
474                 /*
475                  * Start is aligned the same or better than end,
476                  * just align length.
477                  */
478                 buf_size = P2ALIGN(buf_size, nic_align_end);
479         }
480
481         return buf_size;
482 }
483
484 int
485 sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
486              uint16_t nb_rx_desc, unsigned int socket_id,
487              const struct rte_eth_rxconf *rx_conf,
488              struct rte_mempool *mb_pool)
489 {
490         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
491         int rc;
492         uint16_t buf_size;
493         struct sfc_rxq_info *rxq_info;
494         unsigned int evq_index;
495         struct sfc_evq *evq;
496         struct sfc_rxq *rxq;
497
498         rc = sfc_rx_qcheck_conf(sa, rx_conf);
499         if (rc != 0)
500                 goto fail_bad_conf;
501
502         buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool);
503         if (buf_size == 0) {
504                 sfc_err(sa, "RxQ %u mbuf pool object size is too small",
505                         sw_index);
506                 rc = EINVAL;
507                 goto fail_bad_conf;
508         }
509
510         if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
511             !sa->eth_dev->data->dev_conf.rxmode.enable_scatter) {
512                 sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
513                         "object size is too small", sw_index);
514                 sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
515                         "PDU size %u plus Rx prefix %u bytes",
516                         sw_index, buf_size, (unsigned int)sa->port.pdu,
517                         encp->enc_rx_prefix_size);
518                 rc = EINVAL;
519                 goto fail_bad_conf;
520         }
521
522         SFC_ASSERT(sw_index < sa->rxq_count);
523         rxq_info = &sa->rxq_info[sw_index];
524
525         SFC_ASSERT(nb_rx_desc <= rxq_info->max_entries);
526         rxq_info->entries = nb_rx_desc;
527         rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
528
529         evq_index = sfc_evq_index_by_rxq_sw_index(sa, sw_index);
530
531         rc = sfc_ev_qinit(sa, evq_index, rxq_info->entries, socket_id);
532         if (rc != 0)
533                 goto fail_ev_qinit;
534
535         evq = sa->evq_info[evq_index].evq;
536
537         rc = ENOMEM;
538         rxq = rte_zmalloc_socket("sfc-rxq", sizeof(*rxq), RTE_CACHE_LINE_SIZE,
539                                  socket_id);
540         if (rxq == NULL)
541                 goto fail_rxq_alloc;
542
543         rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries),
544                            socket_id, &rxq->mem);
545         if (rc != 0)
546                 goto fail_dma_alloc;
547
548         rc = ENOMEM;
549         rxq->sw_desc = rte_calloc_socket("sfc-rxq-sw_desc", rxq_info->entries,
550                                          sizeof(*rxq->sw_desc),
551                                          RTE_CACHE_LINE_SIZE, socket_id);
552         if (rxq->sw_desc == NULL)
553                 goto fail_desc_alloc;
554
555         evq->rxq = rxq;
556         rxq->evq = evq;
557         rxq->ptr_mask = rxq_info->entries - 1;
558         rxq->refill_mb_pool = mb_pool;
559         rxq->buf_size = buf_size;
560         rxq->hw_index = sw_index;
561         rxq->port_id = sa->eth_dev->data->port_id;
562
563         /* Cache limits required on datapath in RxQ structure */
564         rxq->batch_max = encp->enc_rx_batch_max;
565         rxq->prefix_size = encp->enc_rx_prefix_size;
566
567         rxq->state = SFC_RXQ_INITIALIZED;
568
569         rxq_info->rxq = rxq;
570
571         return 0;
572
573 fail_desc_alloc:
574         sfc_dma_free(sa, &rxq->mem);
575
576 fail_dma_alloc:
577         rte_free(rxq);
578
579 fail_rxq_alloc:
580         sfc_ev_qfini(sa, evq_index);
581
582 fail_ev_qinit:
583         rxq_info->entries = 0;
584
585 fail_bad_conf:
586         sfc_log_init(sa, "failed %d", rc);
587         return rc;
588 }
589
590 void
591 sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
592 {
593         struct sfc_rxq_info *rxq_info;
594         struct sfc_rxq *rxq;
595
596         SFC_ASSERT(sw_index < sa->rxq_count);
597
598         rxq_info = &sa->rxq_info[sw_index];
599
600         rxq = rxq_info->rxq;
601         SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
602
603         rxq_info->rxq = NULL;
604         rxq_info->entries = 0;
605
606         rte_free(rxq->sw_desc);
607         sfc_dma_free(sa, &rxq->mem);
608         rte_free(rxq);
609 }
610
611 int
612 sfc_rx_start(struct sfc_adapter *sa)
613 {
614         unsigned int sw_index;
615         int rc;
616
617         sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
618
619         rc = efx_rx_init(sa->nic);
620         if (rc != 0)
621                 goto fail_rx_init;
622
623         for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
624                 rc = sfc_rx_qstart(sa, sw_index);
625                 if (rc != 0)
626                         goto fail_rx_qstart;
627         }
628
629         return 0;
630
631 fail_rx_qstart:
632         while (sw_index-- > 0)
633                 sfc_rx_qstop(sa, sw_index);
634
635         efx_rx_fini(sa->nic);
636
637 fail_rx_init:
638         sfc_log_init(sa, "failed %d", rc);
639         return rc;
640 }
641
642 void
643 sfc_rx_stop(struct sfc_adapter *sa)
644 {
645         unsigned int sw_index;
646
647         sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
648
649         sw_index = sa->rxq_count;
650         while (sw_index-- > 0) {
651                 if (sa->rxq_info[sw_index].rxq != NULL)
652                         sfc_rx_qstop(sa, sw_index);
653         }
654
655         efx_rx_fini(sa->nic);
656 }
657
658 static int
659 sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
660 {
661         struct sfc_rxq_info *rxq_info = &sa->rxq_info[sw_index];
662         unsigned int max_entries;
663
664         max_entries = EFX_RXQ_MAXNDESCS;
665         SFC_ASSERT(rte_is_power_of_2(max_entries));
666
667         rxq_info->max_entries = max_entries;
668
669         return 0;
670 }
671
672 static int
673 sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
674 {
675         int rc = 0;
676
677         switch (rxmode->mq_mode) {
678         case ETH_MQ_RX_NONE:
679                 /* No special checks are required */
680                 break;
681         default:
682                 sfc_err(sa, "Rx multi-queue mode %u not supported",
683                         rxmode->mq_mode);
684                 rc = EINVAL;
685         }
686
687         if (rxmode->header_split) {
688                 sfc_err(sa, "Header split on Rx not supported");
689                 rc = EINVAL;
690         }
691
692         if (rxmode->hw_vlan_filter) {
693                 sfc_err(sa, "HW VLAN filtering not supported");
694                 rc = EINVAL;
695         }
696
697         if (rxmode->hw_vlan_strip) {
698                 sfc_err(sa, "HW VLAN stripping not supported");
699                 rc = EINVAL;
700         }
701
702         if (rxmode->hw_vlan_extend) {
703                 sfc_err(sa,
704                         "Q-in-Q HW VLAN stripping not supported");
705                 rc = EINVAL;
706         }
707
708         if (!rxmode->hw_strip_crc) {
709                 sfc_warn(sa,
710                          "FCS stripping control not supported - always stripped");
711                 rxmode->hw_strip_crc = 1;
712         }
713
714         if (rxmode->enable_scatter) {
715                 sfc_err(sa, "Scatter on Rx not supported");
716                 rc = EINVAL;
717         }
718
719         if (rxmode->enable_lro) {
720                 sfc_err(sa, "LRO not supported");
721                 rc = EINVAL;
722         }
723
724         return rc;
725 }
726
727 /**
728  * Initialize Rx subsystem.
729  *
730  * Called at device configuration stage when number of receive queues is
731  * specified together with other device level receive configuration.
732  *
733  * It should be used to allocate NUMA-unaware resources.
734  */
735 int
736 sfc_rx_init(struct sfc_adapter *sa)
737 {
738         struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
739         unsigned int sw_index;
740         int rc;
741
742         rc = sfc_rx_check_mode(sa, &dev_conf->rxmode);
743         if (rc != 0)
744                 goto fail_check_mode;
745
746         sa->rxq_count = sa->eth_dev->data->nb_rx_queues;
747
748         rc = ENOMEM;
749         sa->rxq_info = rte_calloc_socket("sfc-rxqs", sa->rxq_count,
750                                          sizeof(struct sfc_rxq_info), 0,
751                                          sa->socket_id);
752         if (sa->rxq_info == NULL)
753                 goto fail_rxqs_alloc;
754
755         for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
756                 rc = sfc_rx_qinit_info(sa, sw_index);
757                 if (rc != 0)
758                         goto fail_rx_qinit_info;
759         }
760
761         return 0;
762
763 fail_rx_qinit_info:
764         rte_free(sa->rxq_info);
765         sa->rxq_info = NULL;
766
767 fail_rxqs_alloc:
768         sa->rxq_count = 0;
769 fail_check_mode:
770         sfc_log_init(sa, "failed %d", rc);
771         return rc;
772 }
773
774 /**
775  * Shutdown Rx subsystem.
776  *
777  * Called at device close stage, for example, before device
778  * reconfiguration or shutdown.
779  */
780 void
781 sfc_rx_fini(struct sfc_adapter *sa)
782 {
783         unsigned int sw_index;
784
785         sw_index = sa->rxq_count;
786         while (sw_index-- > 0) {
787                 if (sa->rxq_info[sw_index].rxq != NULL)
788                         sfc_rx_qfini(sa, sw_index);
789         }
790
791         rte_free(sa->rxq_info);
792         sa->rxq_info = NULL;
793         sa->rxq_count = 0;
794 }