net/sfc: factor out libefx-based Rx datapath
[dpdk.git] / drivers / net / sfc / sfc_rx.c
1 /*-
2  *   BSD LICENSE
3  *
4  * Copyright (c) 2016-2017 Solarflare Communications Inc.
5  * All rights reserved.
6  *
7  * This software was jointly developed between OKTET Labs (under contract
8  * for Solarflare) and Solarflare Communications, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright notice,
14  *    this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright notice,
16  *    this list of conditions and the following disclaimer in the documentation
17  *    and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31
32 #include <rte_mempool.h>
33
34 #include "efx.h"
35
36 #include "sfc.h"
37 #include "sfc_debug.h"
38 #include "sfc_log.h"
39 #include "sfc_ev.h"
40 #include "sfc_rx.h"
41 #include "sfc_kvargs.h"
42 #include "sfc_tweak.h"
43
44 /*
45  * Maximum number of Rx queue flush attempt in the case of failure or
46  * flush timeout
47  */
48 #define SFC_RX_QFLUSH_ATTEMPTS          (3)
49
50 /*
51  * Time to wait between event queue polling attempts when waiting for Rx
52  * queue flush done or failed events.
53  */
54 #define SFC_RX_QFLUSH_POLL_WAIT_MS      (1)
55
56 /*
57  * Maximum number of event queue polling attempts when waiting for Rx queue
58  * flush done or failed events. It defines Rx queue flush attempt timeout
59  * together with SFC_RX_QFLUSH_POLL_WAIT_MS.
60  */
61 #define SFC_RX_QFLUSH_POLL_ATTEMPTS     (2000)
62
63 void
64 sfc_rx_qflush_done(struct sfc_rxq *rxq)
65 {
66         rxq->state |= SFC_RXQ_FLUSHED;
67         rxq->state &= ~SFC_RXQ_FLUSHING;
68 }
69
70 void
71 sfc_rx_qflush_failed(struct sfc_rxq *rxq)
72 {
73         rxq->state |= SFC_RXQ_FLUSH_FAILED;
74         rxq->state &= ~SFC_RXQ_FLUSHING;
75 }
76
77 static void
78 sfc_efx_rx_qrefill(struct sfc_efx_rxq *rxq)
79 {
80         unsigned int free_space;
81         unsigned int bulks;
82         void *objs[SFC_RX_REFILL_BULK];
83         efsys_dma_addr_t addr[RTE_DIM(objs)];
84         unsigned int added = rxq->added;
85         unsigned int id;
86         unsigned int i;
87         struct sfc_efx_rx_sw_desc *rxd;
88         struct rte_mbuf *m;
89         uint16_t port_id = rxq->dp.dpq.port_id;
90
91         free_space = EFX_RXQ_LIMIT(rxq->ptr_mask + 1) -
92                 (added - rxq->completed);
93
94         if (free_space < rxq->refill_threshold)
95                 return;
96
97         bulks = free_space / RTE_DIM(objs);
98
99         id = added & rxq->ptr_mask;
100         while (bulks-- > 0) {
101                 if (rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
102                                          RTE_DIM(objs)) < 0) {
103                         /*
104                          * It is hardly a safe way to increment counter
105                          * from different contexts, but all PMDs do it.
106                          */
107                         rxq->evq->sa->eth_dev->data->rx_mbuf_alloc_failed +=
108                                 RTE_DIM(objs);
109                         break;
110                 }
111
112                 for (i = 0; i < RTE_DIM(objs);
113                      ++i, id = (id + 1) & rxq->ptr_mask) {
114                         m = objs[i];
115
116                         rxd = &rxq->sw_desc[id];
117                         rxd->mbuf = m;
118
119                         rte_mbuf_refcnt_set(m, 1);
120                         m->data_off = RTE_PKTMBUF_HEADROOM;
121                         m->next = NULL;
122                         m->nb_segs = 1;
123                         m->port = port_id;
124
125                         addr[i] = rte_pktmbuf_mtophys(m);
126                 }
127
128                 efx_rx_qpost(rxq->common, addr, rxq->buf_size,
129                              RTE_DIM(objs), rxq->completed, added);
130                 added += RTE_DIM(objs);
131         }
132
133         /* Push doorbell if something is posted */
134         if (rxq->added != added) {
135                 rxq->added = added;
136                 efx_rx_qpush(rxq->common, added, &rxq->pushed);
137         }
138 }
139
140 static uint64_t
141 sfc_efx_rx_desc_flags_to_offload_flags(const unsigned int desc_flags)
142 {
143         uint64_t mbuf_flags = 0;
144
145         switch (desc_flags & (EFX_PKT_IPV4 | EFX_CKSUM_IPV4)) {
146         case (EFX_PKT_IPV4 | EFX_CKSUM_IPV4):
147                 mbuf_flags |= PKT_RX_IP_CKSUM_GOOD;
148                 break;
149         case EFX_PKT_IPV4:
150                 mbuf_flags |= PKT_RX_IP_CKSUM_BAD;
151                 break;
152         default:
153                 RTE_BUILD_BUG_ON(PKT_RX_IP_CKSUM_UNKNOWN != 0);
154                 SFC_ASSERT((mbuf_flags & PKT_RX_IP_CKSUM_MASK) ==
155                            PKT_RX_IP_CKSUM_UNKNOWN);
156                 break;
157         }
158
159         switch ((desc_flags &
160                  (EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP))) {
161         case (EFX_PKT_TCP | EFX_CKSUM_TCPUDP):
162         case (EFX_PKT_UDP | EFX_CKSUM_TCPUDP):
163                 mbuf_flags |= PKT_RX_L4_CKSUM_GOOD;
164                 break;
165         case EFX_PKT_TCP:
166         case EFX_PKT_UDP:
167                 mbuf_flags |= PKT_RX_L4_CKSUM_BAD;
168                 break;
169         default:
170                 RTE_BUILD_BUG_ON(PKT_RX_L4_CKSUM_UNKNOWN != 0);
171                 SFC_ASSERT((mbuf_flags & PKT_RX_L4_CKSUM_MASK) ==
172                            PKT_RX_L4_CKSUM_UNKNOWN);
173                 break;
174         }
175
176         return mbuf_flags;
177 }
178
179 static uint32_t
180 sfc_efx_rx_desc_flags_to_packet_type(const unsigned int desc_flags)
181 {
182         return RTE_PTYPE_L2_ETHER |
183                 ((desc_flags & EFX_PKT_IPV4) ?
184                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : 0) |
185                 ((desc_flags & EFX_PKT_IPV6) ?
186                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : 0) |
187                 ((desc_flags & EFX_PKT_TCP) ? RTE_PTYPE_L4_TCP : 0) |
188                 ((desc_flags & EFX_PKT_UDP) ? RTE_PTYPE_L4_UDP : 0);
189 }
190
191 static const uint32_t *
192 sfc_efx_supported_ptypes_get(void)
193 {
194         static const uint32_t ptypes[] = {
195                 RTE_PTYPE_L2_ETHER,
196                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
197                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
198                 RTE_PTYPE_L4_TCP,
199                 RTE_PTYPE_L4_UDP,
200                 RTE_PTYPE_UNKNOWN
201         };
202
203         return ptypes;
204 }
205
206 static void
207 sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags,
208                         struct rte_mbuf *m)
209 {
210 #if EFSYS_OPT_RX_SCALE
211         uint8_t *mbuf_data;
212
213
214         if ((rxq->flags & SFC_EFX_RXQ_FLAG_RSS_HASH) == 0)
215                 return;
216
217         mbuf_data = rte_pktmbuf_mtod(m, uint8_t *);
218
219         if (flags & (EFX_PKT_IPV4 | EFX_PKT_IPV6)) {
220                 m->hash.rss = efx_pseudo_hdr_hash_get(rxq->common,
221                                                       EFX_RX_HASHALG_TOEPLITZ,
222                                                       mbuf_data);
223
224                 m->ol_flags |= PKT_RX_RSS_HASH;
225         }
226 #endif
227 }
228
229 static uint16_t
230 sfc_efx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
231 {
232         struct sfc_dp_rxq *dp_rxq = rx_queue;
233         struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
234         unsigned int completed;
235         unsigned int prefix_size = rxq->prefix_size;
236         unsigned int done_pkts = 0;
237         boolean_t discard_next = B_FALSE;
238         struct rte_mbuf *scatter_pkt = NULL;
239
240         if (unlikely((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0))
241                 return 0;
242
243         sfc_ev_qpoll(rxq->evq);
244
245         completed = rxq->completed;
246         while (completed != rxq->pending && done_pkts < nb_pkts) {
247                 unsigned int id;
248                 struct sfc_efx_rx_sw_desc *rxd;
249                 struct rte_mbuf *m;
250                 unsigned int seg_len;
251                 unsigned int desc_flags;
252
253                 id = completed++ & rxq->ptr_mask;
254                 rxd = &rxq->sw_desc[id];
255                 m = rxd->mbuf;
256                 desc_flags = rxd->flags;
257
258                 if (discard_next)
259                         goto discard;
260
261                 if (desc_flags & (EFX_ADDR_MISMATCH | EFX_DISCARD))
262                         goto discard;
263
264                 if (desc_flags & EFX_PKT_PREFIX_LEN) {
265                         uint16_t tmp_size;
266                         int rc __rte_unused;
267
268                         rc = efx_pseudo_hdr_pkt_length_get(rxq->common,
269                                 rte_pktmbuf_mtod(m, uint8_t *), &tmp_size);
270                         SFC_ASSERT(rc == 0);
271                         seg_len = tmp_size;
272                 } else {
273                         seg_len = rxd->size - prefix_size;
274                 }
275
276                 rte_pktmbuf_data_len(m) = seg_len;
277                 rte_pktmbuf_pkt_len(m) = seg_len;
278
279                 if (scatter_pkt != NULL) {
280                         if (rte_pktmbuf_chain(scatter_pkt, m) != 0) {
281                                 rte_mempool_put(rxq->refill_mb_pool,
282                                                 scatter_pkt);
283                                 goto discard;
284                         }
285                         /* The packet to deliver */
286                         m = scatter_pkt;
287                 }
288
289                 if (desc_flags & EFX_PKT_CONT) {
290                         /* The packet is scattered, more fragments to come */
291                         scatter_pkt = m;
292                         /* Futher fragments have no prefix */
293                         prefix_size = 0;
294                         continue;
295                 }
296
297                 /* Scattered packet is done */
298                 scatter_pkt = NULL;
299                 /* The first fragment of the packet has prefix */
300                 prefix_size = rxq->prefix_size;
301
302                 m->ol_flags =
303                         sfc_efx_rx_desc_flags_to_offload_flags(desc_flags);
304                 m->packet_type =
305                         sfc_efx_rx_desc_flags_to_packet_type(desc_flags);
306
307                 /*
308                  * Extract RSS hash from the packet prefix and
309                  * set the corresponding field (if needed and possible)
310                  */
311                 sfc_efx_rx_set_rss_hash(rxq, desc_flags, m);
312
313                 m->data_off += prefix_size;
314
315                 *rx_pkts++ = m;
316                 done_pkts++;
317                 continue;
318
319 discard:
320                 discard_next = ((desc_flags & EFX_PKT_CONT) != 0);
321                 rte_mempool_put(rxq->refill_mb_pool, m);
322                 rxd->mbuf = NULL;
323         }
324
325         /* pending is only moved when entire packet is received */
326         SFC_ASSERT(scatter_pkt == NULL);
327
328         rxq->completed = completed;
329
330         sfc_efx_rx_qrefill(rxq);
331
332         return done_pkts;
333 }
334
335 static sfc_dp_rx_qdesc_npending_t sfc_efx_rx_qdesc_npending;
336 static unsigned int
337 sfc_efx_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq)
338 {
339         struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
340
341         if ((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0)
342                 return 0;
343
344         sfc_ev_qpoll(rxq->evq);
345
346         return rxq->pending - rxq->completed;
347 }
348
349 struct sfc_rxq *
350 sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
351 {
352         const struct sfc_dp_queue *dpq = &dp_rxq->dpq;
353         struct rte_eth_dev *eth_dev;
354         struct sfc_adapter *sa;
355         struct sfc_rxq *rxq;
356
357         SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
358         eth_dev = &rte_eth_devices[dpq->port_id];
359
360         sa = eth_dev->data->dev_private;
361
362         SFC_ASSERT(dpq->queue_id < sa->rxq_count);
363         rxq = sa->rxq_info[dpq->queue_id].rxq;
364
365         SFC_ASSERT(rxq != NULL);
366         return rxq;
367 }
368
369 static sfc_dp_rx_qcreate_t sfc_efx_rx_qcreate;
370 static int
371 sfc_efx_rx_qcreate(uint16_t port_id, uint16_t queue_id,
372                    const struct rte_pci_addr *pci_addr, int socket_id,
373                    const struct sfc_dp_rx_qcreate_info *info,
374                    struct sfc_dp_rxq **dp_rxqp)
375 {
376         struct sfc_efx_rxq *rxq;
377         int rc;
378
379         rc = ENOMEM;
380         rxq = rte_zmalloc_socket("sfc-efx-rxq", sizeof(*rxq),
381                                  RTE_CACHE_LINE_SIZE, socket_id);
382         if (rxq == NULL)
383                 goto fail_rxq_alloc;
384
385         sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
386
387         rc = ENOMEM;
388         rxq->sw_desc = rte_calloc_socket("sfc-efx-rxq-sw_desc",
389                                          info->rxq_entries,
390                                          sizeof(*rxq->sw_desc),
391                                          RTE_CACHE_LINE_SIZE, socket_id);
392         if (rxq->sw_desc == NULL)
393                 goto fail_desc_alloc;
394
395         /* efx datapath is bound to efx control path */
396         rxq->evq = sfc_rxq_by_dp_rxq(&rxq->dp)->evq;
397         if (info->flags & SFC_RXQ_FLAG_RSS_HASH)
398                 rxq->flags |= SFC_EFX_RXQ_FLAG_RSS_HASH;
399         rxq->ptr_mask = info->rxq_entries - 1;
400         rxq->batch_max = info->batch_max;
401         rxq->prefix_size = info->prefix_size;
402         rxq->refill_threshold = info->refill_threshold;
403         rxq->buf_size = info->buf_size;
404         rxq->refill_mb_pool = info->refill_mb_pool;
405
406         *dp_rxqp = &rxq->dp;
407         return 0;
408
409 fail_desc_alloc:
410         rte_free(rxq);
411
412 fail_rxq_alloc:
413         return rc;
414 }
415
416 static sfc_dp_rx_qdestroy_t sfc_efx_rx_qdestroy;
417 static void
418 sfc_efx_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
419 {
420         struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
421
422         rte_free(rxq->sw_desc);
423         rte_free(rxq);
424 }
425
426 static sfc_dp_rx_qstart_t sfc_efx_rx_qstart;
427 static int
428 sfc_efx_rx_qstart(struct sfc_dp_rxq *dp_rxq,
429                   __rte_unused unsigned int evq_read_ptr)
430 {
431         /* libefx-based datapath is specific to libefx-based PMD */
432         struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
433         struct sfc_rxq *crxq = sfc_rxq_by_dp_rxq(dp_rxq);
434
435         rxq->common = crxq->common;
436
437         rxq->pending = rxq->completed = rxq->added = rxq->pushed = 0;
438
439         sfc_efx_rx_qrefill(rxq);
440
441         rxq->flags |= (SFC_EFX_RXQ_FLAG_STARTED | SFC_EFX_RXQ_FLAG_RUNNING);
442
443         return 0;
444 }
445
446 static sfc_dp_rx_qstop_t sfc_efx_rx_qstop;
447 static void
448 sfc_efx_rx_qstop(struct sfc_dp_rxq *dp_rxq,
449                  __rte_unused unsigned int *evq_read_ptr)
450 {
451         struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
452
453         rxq->flags &= ~SFC_EFX_RXQ_FLAG_RUNNING;
454
455         /* libefx-based datapath is bound to libefx-based PMD and uses
456          * event queue structure directly. So, there is no necessity to
457          * return EvQ read pointer.
458          */
459 }
460
461 static sfc_dp_rx_qpurge_t sfc_efx_rx_qpurge;
462 static void
463 sfc_efx_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
464 {
465         struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
466         unsigned int i;
467         struct sfc_efx_rx_sw_desc *rxd;
468
469         for (i = rxq->completed; i != rxq->added; ++i) {
470                 rxd = &rxq->sw_desc[i & rxq->ptr_mask];
471                 rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
472                 rxd->mbuf = NULL;
473                 /* Packed stream relies on 0 in inactive SW desc.
474                  * Rx queue stop is not performance critical, so
475                  * there is no harm to do it always.
476                  */
477                 rxd->flags = 0;
478                 rxd->size = 0;
479         }
480
481         rxq->flags &= ~SFC_EFX_RXQ_FLAG_STARTED;
482 }
483
484 struct sfc_dp_rx sfc_efx_rx = {
485         .dp = {
486                 .name           = SFC_KVARG_DATAPATH_EFX,
487                 .type           = SFC_DP_RX,
488                 .hw_fw_caps     = 0,
489         },
490         .qcreate                = sfc_efx_rx_qcreate,
491         .qdestroy               = sfc_efx_rx_qdestroy,
492         .qstart                 = sfc_efx_rx_qstart,
493         .qstop                  = sfc_efx_rx_qstop,
494         .qpurge                 = sfc_efx_rx_qpurge,
495         .supported_ptypes_get   = sfc_efx_supported_ptypes_get,
496         .qdesc_npending         = sfc_efx_rx_qdesc_npending,
497         .pkt_burst              = sfc_efx_recv_pkts,
498 };
499
500 unsigned int
501 sfc_rx_qdesc_npending(struct sfc_adapter *sa, unsigned int sw_index)
502 {
503         struct sfc_rxq *rxq;
504
505         SFC_ASSERT(sw_index < sa->rxq_count);
506         rxq = sa->rxq_info[sw_index].rxq;
507
508         if (rxq == NULL || (rxq->state & SFC_RXQ_STARTED) == 0)
509                 return 0;
510
511         return sa->dp_rx->qdesc_npending(rxq->dp);
512 }
513
514 int
515 sfc_rx_qdesc_done(struct sfc_dp_rxq *dp_rxq, unsigned int offset)
516 {
517         struct sfc_rxq *rxq = sfc_rxq_by_dp_rxq(dp_rxq);
518
519         return offset < rxq->evq->sa->dp_rx->qdesc_npending(dp_rxq);
520 }
521
522 static void
523 sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
524 {
525         struct sfc_rxq *rxq;
526         unsigned int retry_count;
527         unsigned int wait_count;
528
529         rxq = sa->rxq_info[sw_index].rxq;
530         SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
531
532         /*
533          * Retry Rx queue flushing in the case of flush failed or
534          * timeout. In the worst case it can delay for 6 seconds.
535          */
536         for (retry_count = 0;
537              ((rxq->state & SFC_RXQ_FLUSHED) == 0) &&
538              (retry_count < SFC_RX_QFLUSH_ATTEMPTS);
539              ++retry_count) {
540                 if (efx_rx_qflush(rxq->common) != 0) {
541                         rxq->state |= SFC_RXQ_FLUSH_FAILED;
542                         break;
543                 }
544                 rxq->state &= ~SFC_RXQ_FLUSH_FAILED;
545                 rxq->state |= SFC_RXQ_FLUSHING;
546
547                 /*
548                  * Wait for Rx queue flush done or failed event at least
549                  * SFC_RX_QFLUSH_POLL_WAIT_MS milliseconds and not more
550                  * than 2 seconds (SFC_RX_QFLUSH_POLL_WAIT_MS multiplied
551                  * by SFC_RX_QFLUSH_POLL_ATTEMPTS).
552                  */
553                 wait_count = 0;
554                 do {
555                         rte_delay_ms(SFC_RX_QFLUSH_POLL_WAIT_MS);
556                         sfc_ev_qpoll(rxq->evq);
557                 } while ((rxq->state & SFC_RXQ_FLUSHING) &&
558                          (wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS));
559
560                 if (rxq->state & SFC_RXQ_FLUSHING)
561                         sfc_err(sa, "RxQ %u flush timed out", sw_index);
562
563                 if (rxq->state & SFC_RXQ_FLUSH_FAILED)
564                         sfc_err(sa, "RxQ %u flush failed", sw_index);
565
566                 if (rxq->state & SFC_RXQ_FLUSHED)
567                         sfc_info(sa, "RxQ %u flushed", sw_index);
568         }
569
570         sa->dp_rx->qpurge(rxq->dp);
571 }
572
573 static int
574 sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq)
575 {
576         boolean_t rss = (sa->rss_channels > 1) ? B_TRUE : B_FALSE;
577         struct sfc_port *port = &sa->port;
578         int rc;
579
580         /*
581          * If promiscuous or all-multicast mode has been requested, setting
582          * filter for the default Rx queue might fail, in particular, while
583          * running over PCI function which is not a member of corresponding
584          * privilege groups; if this occurs, few iterations will be made to
585          * repeat this step without promiscuous and all-multicast flags set
586          */
587 retry:
588         rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common, rss);
589         if (rc == 0)
590                 return 0;
591         else if (rc != EOPNOTSUPP)
592                 return rc;
593
594         if (port->promisc) {
595                 sfc_warn(sa, "promiscuous mode has been requested, "
596                              "but the HW rejects it");
597                 sfc_warn(sa, "promiscuous mode will be disabled");
598
599                 port->promisc = B_FALSE;
600                 rc = sfc_set_rx_mode(sa);
601                 if (rc != 0)
602                         return rc;
603
604                 goto retry;
605         }
606
607         if (port->allmulti) {
608                 sfc_warn(sa, "all-multicast mode has been requested, "
609                              "but the HW rejects it");
610                 sfc_warn(sa, "all-multicast mode will be disabled");
611
612                 port->allmulti = B_FALSE;
613                 rc = sfc_set_rx_mode(sa);
614                 if (rc != 0)
615                         return rc;
616
617                 goto retry;
618         }
619
620         return rc;
621 }
622
623 int
624 sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
625 {
626         struct sfc_rxq_info *rxq_info;
627         struct sfc_rxq *rxq;
628         struct sfc_evq *evq;
629         int rc;
630
631         sfc_log_init(sa, "sw_index=%u", sw_index);
632
633         SFC_ASSERT(sw_index < sa->rxq_count);
634
635         rxq_info = &sa->rxq_info[sw_index];
636         rxq = rxq_info->rxq;
637         SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
638
639         evq = rxq->evq;
640
641         rc = sfc_ev_qstart(sa, evq->evq_index);
642         if (rc != 0)
643                 goto fail_ev_qstart;
644
645         rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
646                             &rxq->mem, rxq_info->entries,
647                             0 /* not used on EF10 */, evq->common,
648                             &rxq->common);
649         if (rc != 0)
650                 goto fail_rx_qcreate;
651
652         efx_rx_qenable(rxq->common);
653
654         rc = sa->dp_rx->qstart(rxq->dp, evq->read_ptr);
655         if (rc != 0)
656                 goto fail_dp_qstart;
657
658         rxq->state |= SFC_RXQ_STARTED;
659
660         if (sw_index == 0) {
661                 rc = sfc_rx_default_rxq_set_filter(sa, rxq);
662                 if (rc != 0)
663                         goto fail_mac_filter_default_rxq_set;
664         }
665
666         /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
667         sa->eth_dev->data->rx_queue_state[sw_index] =
668                 RTE_ETH_QUEUE_STATE_STARTED;
669
670         return 0;
671
672 fail_mac_filter_default_rxq_set:
673         sa->dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr);
674
675 fail_dp_qstart:
676         sfc_rx_qflush(sa, sw_index);
677
678 fail_rx_qcreate:
679         sfc_ev_qstop(sa, evq->evq_index);
680
681 fail_ev_qstart:
682         return rc;
683 }
684
685 void
686 sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
687 {
688         struct sfc_rxq_info *rxq_info;
689         struct sfc_rxq *rxq;
690
691         sfc_log_init(sa, "sw_index=%u", sw_index);
692
693         SFC_ASSERT(sw_index < sa->rxq_count);
694
695         rxq_info = &sa->rxq_info[sw_index];
696         rxq = rxq_info->rxq;
697
698         if (rxq->state == SFC_RXQ_INITIALIZED)
699                 return;
700         SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
701
702         /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
703         sa->eth_dev->data->rx_queue_state[sw_index] =
704                 RTE_ETH_QUEUE_STATE_STOPPED;
705
706         sa->dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr);
707
708         if (sw_index == 0)
709                 efx_mac_filter_default_rxq_clear(sa->nic);
710
711         sfc_rx_qflush(sa, sw_index);
712
713         rxq->state = SFC_RXQ_INITIALIZED;
714
715         efx_rx_qdestroy(rxq->common);
716
717         sfc_ev_qstop(sa, rxq->evq->evq_index);
718 }
719
720 static int
721 sfc_rx_qcheck_conf(struct sfc_adapter *sa, uint16_t nb_rx_desc,
722                    const struct rte_eth_rxconf *rx_conf)
723 {
724         const uint16_t rx_free_thresh_max = EFX_RXQ_LIMIT(nb_rx_desc);
725         int rc = 0;
726
727         if (rx_conf->rx_thresh.pthresh != 0 ||
728             rx_conf->rx_thresh.hthresh != 0 ||
729             rx_conf->rx_thresh.wthresh != 0) {
730                 sfc_err(sa,
731                         "RxQ prefetch/host/writeback thresholds are not supported");
732                 rc = EINVAL;
733         }
734
735         if (rx_conf->rx_free_thresh > rx_free_thresh_max) {
736                 sfc_err(sa,
737                         "RxQ free threshold too large: %u vs maximum %u",
738                         rx_conf->rx_free_thresh, rx_free_thresh_max);
739                 rc = EINVAL;
740         }
741
742         if (rx_conf->rx_drop_en == 0) {
743                 sfc_err(sa, "RxQ drop disable is not supported");
744                 rc = EINVAL;
745         }
746
747         return rc;
748 }
749
750 static unsigned int
751 sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool)
752 {
753         uint32_t data_off;
754         uint32_t order;
755
756         /* The mbuf object itself is always cache line aligned */
757         order = rte_bsf32(RTE_CACHE_LINE_SIZE);
758
759         /* Data offset from mbuf object start */
760         data_off = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mb_pool) +
761                 RTE_PKTMBUF_HEADROOM;
762
763         order = MIN(order, rte_bsf32(data_off));
764
765         return 1u << (order - 1);
766 }
767
768 static uint16_t
769 sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool)
770 {
771         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
772         const uint32_t nic_align_start = MAX(1, encp->enc_rx_buf_align_start);
773         const uint32_t nic_align_end = MAX(1, encp->enc_rx_buf_align_end);
774         uint16_t buf_size;
775         unsigned int buf_aligned;
776         unsigned int start_alignment;
777         unsigned int end_padding_alignment;
778
779         /* Below it is assumed that both alignments are power of 2 */
780         SFC_ASSERT(rte_is_power_of_2(nic_align_start));
781         SFC_ASSERT(rte_is_power_of_2(nic_align_end));
782
783         /*
784          * mbuf is always cache line aligned, double-check
785          * that it meets rx buffer start alignment requirements.
786          */
787
788         /* Start from mbuf pool data room size */
789         buf_size = rte_pktmbuf_data_room_size(mb_pool);
790
791         /* Remove headroom */
792         if (buf_size <= RTE_PKTMBUF_HEADROOM) {
793                 sfc_err(sa,
794                         "RxQ mbuf pool %s object data room size %u is smaller than headroom %u",
795                         mb_pool->name, buf_size, RTE_PKTMBUF_HEADROOM);
796                 return 0;
797         }
798         buf_size -= RTE_PKTMBUF_HEADROOM;
799
800         /* Calculate guaranteed data start alignment */
801         buf_aligned = sfc_rx_mbuf_data_alignment(mb_pool);
802
803         /* Reserve space for start alignment */
804         if (buf_aligned < nic_align_start) {
805                 start_alignment = nic_align_start - buf_aligned;
806                 if (buf_size <= start_alignment) {
807                         sfc_err(sa,
808                                 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u and buffer start alignment %u required by NIC",
809                                 mb_pool->name,
810                                 rte_pktmbuf_data_room_size(mb_pool),
811                                 RTE_PKTMBUF_HEADROOM, start_alignment);
812                         return 0;
813                 }
814                 buf_aligned = nic_align_start;
815                 buf_size -= start_alignment;
816         } else {
817                 start_alignment = 0;
818         }
819
820         /* Make sure that end padding does not write beyond the buffer */
821         if (buf_aligned < nic_align_end) {
822                 /*
823                  * Estimate space which can be lost. If guarnteed buffer
824                  * size is odd, lost space is (nic_align_end - 1). More
825                  * accurate formula is below.
826                  */
827                 end_padding_alignment = nic_align_end -
828                         MIN(buf_aligned, 1u << (rte_bsf32(buf_size) - 1));
829                 if (buf_size <= end_padding_alignment) {
830                         sfc_err(sa,
831                                 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u, buffer start alignment %u and end padding alignment %u required by NIC",
832                                 mb_pool->name,
833                                 rte_pktmbuf_data_room_size(mb_pool),
834                                 RTE_PKTMBUF_HEADROOM, start_alignment,
835                                 end_padding_alignment);
836                         return 0;
837                 }
838                 buf_size -= end_padding_alignment;
839         } else {
840                 /*
841                  * Start is aligned the same or better than end,
842                  * just align length.
843                  */
844                 buf_size = P2ALIGN(buf_size, nic_align_end);
845         }
846
847         return buf_size;
848 }
849
850 int
851 sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
852              uint16_t nb_rx_desc, unsigned int socket_id,
853              const struct rte_eth_rxconf *rx_conf,
854              struct rte_mempool *mb_pool)
855 {
856         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
857         int rc;
858         uint16_t buf_size;
859         struct sfc_rxq_info *rxq_info;
860         unsigned int evq_index;
861         struct sfc_evq *evq;
862         struct sfc_rxq *rxq;
863         struct sfc_dp_rx_qcreate_info info;
864
865         rc = sfc_rx_qcheck_conf(sa, nb_rx_desc, rx_conf);
866         if (rc != 0)
867                 goto fail_bad_conf;
868
869         buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool);
870         if (buf_size == 0) {
871                 sfc_err(sa, "RxQ %u mbuf pool object size is too small",
872                         sw_index);
873                 rc = EINVAL;
874                 goto fail_bad_conf;
875         }
876
877         if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
878             !sa->eth_dev->data->dev_conf.rxmode.enable_scatter) {
879                 sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
880                         "object size is too small", sw_index);
881                 sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
882                         "PDU size %u plus Rx prefix %u bytes",
883                         sw_index, buf_size, (unsigned int)sa->port.pdu,
884                         encp->enc_rx_prefix_size);
885                 rc = EINVAL;
886                 goto fail_bad_conf;
887         }
888
889         SFC_ASSERT(sw_index < sa->rxq_count);
890         rxq_info = &sa->rxq_info[sw_index];
891
892         SFC_ASSERT(nb_rx_desc <= rxq_info->max_entries);
893         rxq_info->entries = nb_rx_desc;
894         rxq_info->type =
895                 sa->eth_dev->data->dev_conf.rxmode.enable_scatter ?
896                 EFX_RXQ_TYPE_SCATTER : EFX_RXQ_TYPE_DEFAULT;
897
898         evq_index = sfc_evq_index_by_rxq_sw_index(sa, sw_index);
899
900         rc = sfc_ev_qinit(sa, evq_index, rxq_info->entries, socket_id);
901         if (rc != 0)
902                 goto fail_ev_qinit;
903
904         evq = sa->evq_info[evq_index].evq;
905
906         rc = ENOMEM;
907         rxq = rte_zmalloc_socket("sfc-rxq", sizeof(*rxq), RTE_CACHE_LINE_SIZE,
908                                  socket_id);
909         if (rxq == NULL)
910                 goto fail_rxq_alloc;
911
912         rxq_info->rxq = rxq;
913
914         rxq->evq = evq;
915         rxq->hw_index = sw_index;
916         rxq->refill_threshold = rx_conf->rx_free_thresh;
917         rxq->refill_mb_pool = mb_pool;
918
919         rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries),
920                            socket_id, &rxq->mem);
921         if (rc != 0)
922                 goto fail_dma_alloc;
923
924         memset(&info, 0, sizeof(info));
925         info.refill_mb_pool = rxq->refill_mb_pool;
926         info.refill_threshold = rxq->refill_threshold;
927         info.buf_size = buf_size;
928         info.batch_max = encp->enc_rx_batch_max;
929         info.prefix_size = encp->enc_rx_prefix_size;
930
931 #if EFSYS_OPT_RX_SCALE
932         if (sa->hash_support == EFX_RX_HASH_AVAILABLE)
933                 info.flags |= SFC_RXQ_FLAG_RSS_HASH;
934 #endif
935
936         info.rxq_entries = rxq_info->entries;
937
938         rc = sa->dp_rx->qcreate(sa->eth_dev->data->port_id, sw_index,
939                                 &SFC_DEV_TO_PCI(sa->eth_dev)->addr,
940                                 socket_id, &info, &rxq->dp);
941         if (rc != 0)
942                 goto fail_dp_rx_qcreate;
943
944         evq->dp_rxq = rxq->dp;
945
946         rxq->state = SFC_RXQ_INITIALIZED;
947
948         rxq_info->deferred_start = (rx_conf->rx_deferred_start != 0);
949
950         return 0;
951
952 fail_dp_rx_qcreate:
953         sfc_dma_free(sa, &rxq->mem);
954
955 fail_dma_alloc:
956         rxq_info->rxq = NULL;
957         rte_free(rxq);
958
959 fail_rxq_alloc:
960         sfc_ev_qfini(sa, evq_index);
961
962 fail_ev_qinit:
963         rxq_info->entries = 0;
964
965 fail_bad_conf:
966         sfc_log_init(sa, "failed %d", rc);
967         return rc;
968 }
969
970 void
971 sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
972 {
973         struct sfc_rxq_info *rxq_info;
974         struct sfc_rxq *rxq;
975
976         SFC_ASSERT(sw_index < sa->rxq_count);
977
978         rxq_info = &sa->rxq_info[sw_index];
979
980         rxq = rxq_info->rxq;
981         SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
982
983         sa->dp_rx->qdestroy(rxq->dp);
984         rxq->dp = NULL;
985
986         rxq_info->rxq = NULL;
987         rxq_info->entries = 0;
988
989         sfc_dma_free(sa, &rxq->mem);
990         rte_free(rxq);
991 }
992
993 #if EFSYS_OPT_RX_SCALE
994 efx_rx_hash_type_t
995 sfc_rte_to_efx_hash_type(uint64_t rss_hf)
996 {
997         efx_rx_hash_type_t efx_hash_types = 0;
998
999         if ((rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
1000                        ETH_RSS_NONFRAG_IPV4_OTHER)) != 0)
1001                 efx_hash_types |= EFX_RX_HASH_IPV4;
1002
1003         if ((rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) != 0)
1004                 efx_hash_types |= EFX_RX_HASH_TCPIPV4;
1005
1006         if ((rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
1007                         ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX)) != 0)
1008                 efx_hash_types |= EFX_RX_HASH_IPV6;
1009
1010         if ((rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX)) != 0)
1011                 efx_hash_types |= EFX_RX_HASH_TCPIPV6;
1012
1013         return efx_hash_types;
1014 }
1015
1016 uint64_t
1017 sfc_efx_to_rte_hash_type(efx_rx_hash_type_t efx_hash_types)
1018 {
1019         uint64_t rss_hf = 0;
1020
1021         if ((efx_hash_types & EFX_RX_HASH_IPV4) != 0)
1022                 rss_hf |= (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
1023                            ETH_RSS_NONFRAG_IPV4_OTHER);
1024
1025         if ((efx_hash_types & EFX_RX_HASH_TCPIPV4) != 0)
1026                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1027
1028         if ((efx_hash_types & EFX_RX_HASH_IPV6) != 0)
1029                 rss_hf |= (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
1030                            ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX);
1031
1032         if ((efx_hash_types & EFX_RX_HASH_TCPIPV6) != 0)
1033                 rss_hf |= (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX);
1034
1035         return rss_hf;
1036 }
1037 #endif
1038
1039 static int
1040 sfc_rx_rss_config(struct sfc_adapter *sa)
1041 {
1042         int rc = 0;
1043
1044 #if EFSYS_OPT_RX_SCALE
1045         if (sa->rss_channels > 1) {
1046                 rc = efx_rx_scale_mode_set(sa->nic, EFX_RX_HASHALG_TOEPLITZ,
1047                                            sa->rss_hash_types, B_TRUE);
1048                 if (rc != 0)
1049                         goto finish;
1050
1051                 rc = efx_rx_scale_key_set(sa->nic, sa->rss_key,
1052                                           sizeof(sa->rss_key));
1053                 if (rc != 0)
1054                         goto finish;
1055
1056                 rc = efx_rx_scale_tbl_set(sa->nic, sa->rss_tbl,
1057                                           sizeof(sa->rss_tbl));
1058         }
1059
1060 finish:
1061 #endif
1062         return rc;
1063 }
1064
1065 int
1066 sfc_rx_start(struct sfc_adapter *sa)
1067 {
1068         unsigned int sw_index;
1069         int rc;
1070
1071         sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
1072
1073         rc = efx_rx_init(sa->nic);
1074         if (rc != 0)
1075                 goto fail_rx_init;
1076
1077         rc = sfc_rx_rss_config(sa);
1078         if (rc != 0)
1079                 goto fail_rss_config;
1080
1081         for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
1082                 if ((!sa->rxq_info[sw_index].deferred_start ||
1083                      sa->rxq_info[sw_index].deferred_started)) {
1084                         rc = sfc_rx_qstart(sa, sw_index);
1085                         if (rc != 0)
1086                                 goto fail_rx_qstart;
1087                 }
1088         }
1089
1090         return 0;
1091
1092 fail_rx_qstart:
1093         while (sw_index-- > 0)
1094                 sfc_rx_qstop(sa, sw_index);
1095
1096 fail_rss_config:
1097         efx_rx_fini(sa->nic);
1098
1099 fail_rx_init:
1100         sfc_log_init(sa, "failed %d", rc);
1101         return rc;
1102 }
1103
1104 void
1105 sfc_rx_stop(struct sfc_adapter *sa)
1106 {
1107         unsigned int sw_index;
1108
1109         sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
1110
1111         sw_index = sa->rxq_count;
1112         while (sw_index-- > 0) {
1113                 if (sa->rxq_info[sw_index].rxq != NULL)
1114                         sfc_rx_qstop(sa, sw_index);
1115         }
1116
1117         efx_rx_fini(sa->nic);
1118 }
1119
1120 static int
1121 sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
1122 {
1123         struct sfc_rxq_info *rxq_info = &sa->rxq_info[sw_index];
1124         unsigned int max_entries;
1125
1126         max_entries = EFX_RXQ_MAXNDESCS;
1127         SFC_ASSERT(rte_is_power_of_2(max_entries));
1128
1129         rxq_info->max_entries = max_entries;
1130
1131         return 0;
1132 }
1133
1134 static int
1135 sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
1136 {
1137         int rc = 0;
1138
1139         switch (rxmode->mq_mode) {
1140         case ETH_MQ_RX_NONE:
1141                 /* No special checks are required */
1142                 break;
1143 #if EFSYS_OPT_RX_SCALE
1144         case ETH_MQ_RX_RSS:
1145                 if (sa->rss_support == EFX_RX_SCALE_UNAVAILABLE) {
1146                         sfc_err(sa, "RSS is not available");
1147                         rc = EINVAL;
1148                 }
1149                 break;
1150 #endif
1151         default:
1152                 sfc_err(sa, "Rx multi-queue mode %u not supported",
1153                         rxmode->mq_mode);
1154                 rc = EINVAL;
1155         }
1156
1157         if (rxmode->header_split) {
1158                 sfc_err(sa, "Header split on Rx not supported");
1159                 rc = EINVAL;
1160         }
1161
1162         if (rxmode->hw_vlan_filter) {
1163                 sfc_err(sa, "HW VLAN filtering not supported");
1164                 rc = EINVAL;
1165         }
1166
1167         if (rxmode->hw_vlan_strip) {
1168                 sfc_err(sa, "HW VLAN stripping not supported");
1169                 rc = EINVAL;
1170         }
1171
1172         if (rxmode->hw_vlan_extend) {
1173                 sfc_err(sa,
1174                         "Q-in-Q HW VLAN stripping not supported");
1175                 rc = EINVAL;
1176         }
1177
1178         if (!rxmode->hw_strip_crc) {
1179                 sfc_warn(sa,
1180                          "FCS stripping control not supported - always stripped");
1181                 rxmode->hw_strip_crc = 1;
1182         }
1183
1184         if (rxmode->enable_lro) {
1185                 sfc_err(sa, "LRO not supported");
1186                 rc = EINVAL;
1187         }
1188
1189         return rc;
1190 }
1191
1192 /**
1193  * Initialize Rx subsystem.
1194  *
1195  * Called at device configuration stage when number of receive queues is
1196  * specified together with other device level receive configuration.
1197  *
1198  * It should be used to allocate NUMA-unaware resources.
1199  */
1200 int
1201 sfc_rx_init(struct sfc_adapter *sa)
1202 {
1203         struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
1204         unsigned int sw_index;
1205         int rc;
1206
1207         rc = sfc_rx_check_mode(sa, &dev_conf->rxmode);
1208         if (rc != 0)
1209                 goto fail_check_mode;
1210
1211         sa->rxq_count = sa->eth_dev->data->nb_rx_queues;
1212
1213         rc = ENOMEM;
1214         sa->rxq_info = rte_calloc_socket("sfc-rxqs", sa->rxq_count,
1215                                          sizeof(struct sfc_rxq_info), 0,
1216                                          sa->socket_id);
1217         if (sa->rxq_info == NULL)
1218                 goto fail_rxqs_alloc;
1219
1220         for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
1221                 rc = sfc_rx_qinit_info(sa, sw_index);
1222                 if (rc != 0)
1223                         goto fail_rx_qinit_info;
1224         }
1225
1226 #if EFSYS_OPT_RX_SCALE
1227         sa->rss_channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
1228                            MIN(sa->rxq_count, EFX_MAXRSS) : 1;
1229
1230         if (sa->rss_channels > 1) {
1231                 for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index)
1232                         sa->rss_tbl[sw_index] = sw_index % sa->rss_channels;
1233         }
1234 #endif
1235
1236         return 0;
1237
1238 fail_rx_qinit_info:
1239         rte_free(sa->rxq_info);
1240         sa->rxq_info = NULL;
1241
1242 fail_rxqs_alloc:
1243         sa->rxq_count = 0;
1244 fail_check_mode:
1245         sfc_log_init(sa, "failed %d", rc);
1246         return rc;
1247 }
1248
1249 /**
1250  * Shutdown Rx subsystem.
1251  *
1252  * Called at device close stage, for example, before device
1253  * reconfiguration or shutdown.
1254  */
1255 void
1256 sfc_rx_fini(struct sfc_adapter *sa)
1257 {
1258         unsigned int sw_index;
1259
1260         sw_index = sa->rxq_count;
1261         while (sw_index-- > 0) {
1262                 if (sa->rxq_info[sw_index].rxq != NULL)
1263                         sfc_rx_qfini(sa, sw_index);
1264         }
1265
1266         rte_free(sa->rxq_info);
1267         sa->rxq_info = NULL;
1268         sa->rxq_count = 0;
1269 }