net/sfc: handle received packet type info from HW
[dpdk.git] / drivers / net / sfc / sfc_rx.c
1 /*-
2  * Copyright (c) 2016 Solarflare Communications Inc.
3  * All rights reserved.
4  *
5  * This software was jointly developed between OKTET Labs (under contract
6  * for Solarflare) and Solarflare Communications, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright notice,
14  *    this list of conditions and the following disclaimer in the documentation
15  *    and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #include <rte_mempool.h>
31
32 #include "efx.h"
33
34 #include "sfc.h"
35 #include "sfc_debug.h"
36 #include "sfc_log.h"
37 #include "sfc_ev.h"
38 #include "sfc_rx.h"
39 #include "sfc_tweak.h"
40
41 /*
42  * Maximum number of Rx queue flush attempt in the case of failure or
43  * flush timeout
44  */
45 #define SFC_RX_QFLUSH_ATTEMPTS          (3)
46
47 /*
48  * Time to wait between event queue polling attempts when waiting for Rx
49  * queue flush done or failed events.
50  */
51 #define SFC_RX_QFLUSH_POLL_WAIT_MS      (1)
52
53 /*
54  * Maximum number of event queue polling attempts when waiting for Rx queue
55  * flush done or failed events. It defines Rx queue flush attempt timeout
56  * together with SFC_RX_QFLUSH_POLL_WAIT_MS.
57  */
58 #define SFC_RX_QFLUSH_POLL_ATTEMPTS     (2000)
59
60 void
61 sfc_rx_qflush_done(struct sfc_rxq *rxq)
62 {
63         rxq->state |= SFC_RXQ_FLUSHED;
64         rxq->state &= ~SFC_RXQ_FLUSHING;
65 }
66
67 void
68 sfc_rx_qflush_failed(struct sfc_rxq *rxq)
69 {
70         rxq->state |= SFC_RXQ_FLUSH_FAILED;
71         rxq->state &= ~SFC_RXQ_FLUSHING;
72 }
73
74 static void
75 sfc_rx_qrefill(struct sfc_rxq *rxq)
76 {
77         unsigned int free_space;
78         unsigned int bulks;
79         void *objs[SFC_RX_REFILL_BULK];
80         efsys_dma_addr_t addr[RTE_DIM(objs)];
81         unsigned int added = rxq->added;
82         unsigned int id;
83         unsigned int i;
84         struct sfc_rx_sw_desc *rxd;
85         struct rte_mbuf *m;
86         uint8_t port_id = rxq->port_id;
87
88         free_space = EFX_RXQ_LIMIT(rxq->ptr_mask + 1) -
89                 (added - rxq->completed);
90         bulks = free_space / RTE_DIM(objs);
91
92         id = added & rxq->ptr_mask;
93         while (bulks-- > 0) {
94                 if (rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
95                                          RTE_DIM(objs)) < 0) {
96                         /*
97                          * It is hardly a safe way to increment counter
98                          * from different contexts, but all PMDs do it.
99                          */
100                         rxq->evq->sa->eth_dev->data->rx_mbuf_alloc_failed +=
101                                 RTE_DIM(objs);
102                         break;
103                 }
104
105                 for (i = 0; i < RTE_DIM(objs);
106                      ++i, id = (id + 1) & rxq->ptr_mask) {
107                         m = objs[i];
108
109                         rxd = &rxq->sw_desc[id];
110                         rxd->mbuf = m;
111
112                         rte_mbuf_refcnt_set(m, 1);
113                         m->data_off = RTE_PKTMBUF_HEADROOM;
114                         m->next = NULL;
115                         m->nb_segs = 1;
116                         m->port = port_id;
117
118                         addr[i] = rte_pktmbuf_mtophys(m);
119                 }
120
121                 efx_rx_qpost(rxq->common, addr, rxq->buf_size,
122                              RTE_DIM(objs), rxq->completed, added);
123                 added += RTE_DIM(objs);
124         }
125
126         /* Push doorbell if something is posted */
127         if (rxq->added != added) {
128                 rxq->added = added;
129                 efx_rx_qpush(rxq->common, added, &rxq->pushed);
130         }
131 }
132
133 static uint64_t
134 sfc_rx_desc_flags_to_offload_flags(const unsigned int desc_flags)
135 {
136         uint64_t mbuf_flags = 0;
137
138         switch (desc_flags & (EFX_PKT_IPV4 | EFX_CKSUM_IPV4)) {
139         case (EFX_PKT_IPV4 | EFX_CKSUM_IPV4):
140                 mbuf_flags |= PKT_RX_IP_CKSUM_GOOD;
141                 break;
142         case EFX_PKT_IPV4:
143                 mbuf_flags |= PKT_RX_IP_CKSUM_BAD;
144                 break;
145         default:
146                 RTE_BUILD_BUG_ON(PKT_RX_IP_CKSUM_UNKNOWN != 0);
147                 SFC_ASSERT((mbuf_flags & PKT_RX_IP_CKSUM_MASK) ==
148                            PKT_RX_IP_CKSUM_UNKNOWN);
149                 break;
150         }
151
152         switch ((desc_flags &
153                  (EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP))) {
154         case (EFX_PKT_TCP | EFX_CKSUM_TCPUDP):
155         case (EFX_PKT_UDP | EFX_CKSUM_TCPUDP):
156                 mbuf_flags |= PKT_RX_L4_CKSUM_GOOD;
157                 break;
158         case EFX_PKT_TCP:
159         case EFX_PKT_UDP:
160                 mbuf_flags |= PKT_RX_L4_CKSUM_BAD;
161                 break;
162         default:
163                 RTE_BUILD_BUG_ON(PKT_RX_L4_CKSUM_UNKNOWN != 0);
164                 SFC_ASSERT((mbuf_flags & PKT_RX_L4_CKSUM_MASK) ==
165                            PKT_RX_L4_CKSUM_UNKNOWN);
166                 break;
167         }
168
169         return mbuf_flags;
170 }
171
172 static uint32_t
173 sfc_rx_desc_flags_to_packet_type(const unsigned int desc_flags)
174 {
175         return RTE_PTYPE_L2_ETHER |
176                 ((desc_flags & EFX_PKT_IPV4) ?
177                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : 0) |
178                 ((desc_flags & EFX_PKT_IPV6) ?
179                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : 0) |
180                 ((desc_flags & EFX_PKT_TCP) ? RTE_PTYPE_L4_TCP : 0) |
181                 ((desc_flags & EFX_PKT_UDP) ? RTE_PTYPE_L4_UDP : 0);
182 }
183
184 uint16_t
185 sfc_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
186 {
187         struct sfc_rxq *rxq = rx_queue;
188         unsigned int completed;
189         unsigned int prefix_size = rxq->prefix_size;
190         unsigned int done_pkts = 0;
191         boolean_t discard_next = B_FALSE;
192
193         if (unlikely((rxq->state & SFC_RXQ_RUNNING) == 0))
194                 return 0;
195
196         sfc_ev_qpoll(rxq->evq);
197
198         completed = rxq->completed;
199         while (completed != rxq->pending && done_pkts < nb_pkts) {
200                 unsigned int id;
201                 struct sfc_rx_sw_desc *rxd;
202                 struct rte_mbuf *m;
203                 unsigned int seg_len;
204                 unsigned int desc_flags;
205
206                 id = completed++ & rxq->ptr_mask;
207                 rxd = &rxq->sw_desc[id];
208                 m = rxd->mbuf;
209                 desc_flags = rxd->flags;
210
211                 if (discard_next)
212                         goto discard;
213
214                 if (desc_flags & (EFX_ADDR_MISMATCH | EFX_DISCARD))
215                         goto discard;
216
217                 if (desc_flags & EFX_PKT_CONT)
218                         goto discard;
219
220                 if (desc_flags & EFX_PKT_PREFIX_LEN) {
221                         uint16_t tmp_size;
222                         int rc __rte_unused;
223
224                         rc = efx_pseudo_hdr_pkt_length_get(rxq->common,
225                                 rte_pktmbuf_mtod(m, uint8_t *), &tmp_size);
226                         SFC_ASSERT(rc == 0);
227                         seg_len = tmp_size;
228                 } else {
229                         seg_len = rxd->size - prefix_size;
230                 }
231
232                 m->data_off += prefix_size;
233                 rte_pktmbuf_data_len(m) = seg_len;
234                 rte_pktmbuf_pkt_len(m) = seg_len;
235
236                 m->ol_flags = sfc_rx_desc_flags_to_offload_flags(desc_flags);
237                 m->packet_type = sfc_rx_desc_flags_to_packet_type(desc_flags);
238
239                 *rx_pkts++ = m;
240                 done_pkts++;
241                 continue;
242
243 discard:
244                 discard_next = ((desc_flags & EFX_PKT_CONT) != 0);
245                 rte_mempool_put(rxq->refill_mb_pool, m);
246                 rxd->mbuf = NULL;
247         }
248
249         rxq->completed = completed;
250
251         sfc_rx_qrefill(rxq);
252
253         return done_pkts;
254 }
255
256 static void
257 sfc_rx_qpurge(struct sfc_rxq *rxq)
258 {
259         unsigned int i;
260         struct sfc_rx_sw_desc *rxd;
261
262         for (i = rxq->completed; i != rxq->added; ++i) {
263                 rxd = &rxq->sw_desc[i & rxq->ptr_mask];
264                 rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
265                 rxd->mbuf = NULL;
266         }
267 }
268
269 static void
270 sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
271 {
272         struct sfc_rxq *rxq;
273         unsigned int retry_count;
274         unsigned int wait_count;
275
276         rxq = sa->rxq_info[sw_index].rxq;
277         SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
278
279         /*
280          * Retry Rx queue flushing in the case of flush failed or
281          * timeout. In the worst case it can delay for 6 seconds.
282          */
283         for (retry_count = 0;
284              ((rxq->state & SFC_RXQ_FLUSHED) == 0) &&
285              (retry_count < SFC_RX_QFLUSH_ATTEMPTS);
286              ++retry_count) {
287                 if (efx_rx_qflush(rxq->common) != 0) {
288                         rxq->state |= SFC_RXQ_FLUSH_FAILED;
289                         break;
290                 }
291                 rxq->state &= ~SFC_RXQ_FLUSH_FAILED;
292                 rxq->state |= SFC_RXQ_FLUSHING;
293
294                 /*
295                  * Wait for Rx queue flush done or failed event at least
296                  * SFC_RX_QFLUSH_POLL_WAIT_MS milliseconds and not more
297                  * than 2 seconds (SFC_RX_QFLUSH_POLL_WAIT_MS multiplied
298                  * by SFC_RX_QFLUSH_POLL_ATTEMPTS).
299                  */
300                 wait_count = 0;
301                 do {
302                         rte_delay_ms(SFC_RX_QFLUSH_POLL_WAIT_MS);
303                         sfc_ev_qpoll(rxq->evq);
304                 } while ((rxq->state & SFC_RXQ_FLUSHING) &&
305                          (wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS));
306
307                 if (rxq->state & SFC_RXQ_FLUSHING)
308                         sfc_err(sa, "RxQ %u flush timed out", sw_index);
309
310                 if (rxq->state & SFC_RXQ_FLUSH_FAILED)
311                         sfc_err(sa, "RxQ %u flush failed", sw_index);
312
313                 if (rxq->state & SFC_RXQ_FLUSHED)
314                         sfc_info(sa, "RxQ %u flushed", sw_index);
315         }
316
317         sfc_rx_qpurge(rxq);
318 }
319
320 int
321 sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
322 {
323         struct sfc_rxq_info *rxq_info;
324         struct sfc_rxq *rxq;
325         struct sfc_evq *evq;
326         int rc;
327
328         sfc_log_init(sa, "sw_index=%u", sw_index);
329
330         SFC_ASSERT(sw_index < sa->rxq_count);
331
332         rxq_info = &sa->rxq_info[sw_index];
333         rxq = rxq_info->rxq;
334         SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
335
336         evq = rxq->evq;
337
338         rc = sfc_ev_qstart(sa, evq->evq_index);
339         if (rc != 0)
340                 goto fail_ev_qstart;
341
342         rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
343                             &rxq->mem, rxq_info->entries,
344                             0 /* not used on EF10 */, evq->common,
345                             &rxq->common);
346         if (rc != 0)
347                 goto fail_rx_qcreate;
348
349         efx_rx_qenable(rxq->common);
350
351         rxq->pending = rxq->completed = rxq->added = rxq->pushed = 0;
352
353         rxq->state |= (SFC_RXQ_STARTED | SFC_RXQ_RUNNING);
354
355         sfc_rx_qrefill(rxq);
356
357         if (sw_index == 0) {
358                 rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common,
359                                                     B_FALSE);
360                 if (rc != 0)
361                         goto fail_mac_filter_default_rxq_set;
362         }
363
364         /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
365         sa->eth_dev->data->rx_queue_state[sw_index] =
366                 RTE_ETH_QUEUE_STATE_STARTED;
367
368         return 0;
369
370 fail_mac_filter_default_rxq_set:
371         sfc_rx_qflush(sa, sw_index);
372
373 fail_rx_qcreate:
374         sfc_ev_qstop(sa, evq->evq_index);
375
376 fail_ev_qstart:
377         return rc;
378 }
379
380 void
381 sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
382 {
383         struct sfc_rxq_info *rxq_info;
384         struct sfc_rxq *rxq;
385
386         sfc_log_init(sa, "sw_index=%u", sw_index);
387
388         SFC_ASSERT(sw_index < sa->rxq_count);
389
390         rxq_info = &sa->rxq_info[sw_index];
391         rxq = rxq_info->rxq;
392         SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
393
394         /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
395         sa->eth_dev->data->rx_queue_state[sw_index] =
396                 RTE_ETH_QUEUE_STATE_STOPPED;
397
398         rxq->state &= ~SFC_RXQ_RUNNING;
399
400         if (sw_index == 0)
401                 efx_mac_filter_default_rxq_clear(sa->nic);
402
403         sfc_rx_qflush(sa, sw_index);
404
405         rxq->state = SFC_RXQ_INITIALIZED;
406
407         efx_rx_qdestroy(rxq->common);
408
409         sfc_ev_qstop(sa, rxq->evq->evq_index);
410 }
411
412 static int
413 sfc_rx_qcheck_conf(struct sfc_adapter *sa,
414                    const struct rte_eth_rxconf *rx_conf)
415 {
416         int rc = 0;
417
418         if (rx_conf->rx_thresh.pthresh != 0 ||
419             rx_conf->rx_thresh.hthresh != 0 ||
420             rx_conf->rx_thresh.wthresh != 0) {
421                 sfc_err(sa,
422                         "RxQ prefetch/host/writeback thresholds are not supported");
423                 rc = EINVAL;
424         }
425
426         if (rx_conf->rx_free_thresh != 0) {
427                 sfc_err(sa, "RxQ free threshold is not supported");
428                 rc = EINVAL;
429         }
430
431         if (rx_conf->rx_drop_en == 0) {
432                 sfc_err(sa, "RxQ drop disable is not supported");
433                 rc = EINVAL;
434         }
435
436         if (rx_conf->rx_deferred_start != 0) {
437                 sfc_err(sa, "RxQ deferred start is not supported");
438                 rc = EINVAL;
439         }
440
441         return rc;
442 }
443
444 static unsigned int
445 sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool)
446 {
447         uint32_t data_off;
448         uint32_t order;
449
450         /* The mbuf object itself is always cache line aligned */
451         order = rte_bsf32(RTE_CACHE_LINE_SIZE);
452
453         /* Data offset from mbuf object start */
454         data_off = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mb_pool) +
455                 RTE_PKTMBUF_HEADROOM;
456
457         order = MIN(order, rte_bsf32(data_off));
458
459         return 1u << (order - 1);
460 }
461
462 static uint16_t
463 sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool)
464 {
465         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
466         const uint32_t nic_align_start = MAX(1, encp->enc_rx_buf_align_start);
467         const uint32_t nic_align_end = MAX(1, encp->enc_rx_buf_align_end);
468         uint16_t buf_size;
469         unsigned int buf_aligned;
470         unsigned int start_alignment;
471         unsigned int end_padding_alignment;
472
473         /* Below it is assumed that both alignments are power of 2 */
474         SFC_ASSERT(rte_is_power_of_2(nic_align_start));
475         SFC_ASSERT(rte_is_power_of_2(nic_align_end));
476
477         /*
478          * mbuf is always cache line aligned, double-check
479          * that it meets rx buffer start alignment requirements.
480          */
481
482         /* Start from mbuf pool data room size */
483         buf_size = rte_pktmbuf_data_room_size(mb_pool);
484
485         /* Remove headroom */
486         if (buf_size <= RTE_PKTMBUF_HEADROOM) {
487                 sfc_err(sa,
488                         "RxQ mbuf pool %s object data room size %u is smaller than headroom %u",
489                         mb_pool->name, buf_size, RTE_PKTMBUF_HEADROOM);
490                 return 0;
491         }
492         buf_size -= RTE_PKTMBUF_HEADROOM;
493
494         /* Calculate guaranteed data start alignment */
495         buf_aligned = sfc_rx_mbuf_data_alignment(mb_pool);
496
497         /* Reserve space for start alignment */
498         if (buf_aligned < nic_align_start) {
499                 start_alignment = nic_align_start - buf_aligned;
500                 if (buf_size <= start_alignment) {
501                         sfc_err(sa,
502                                 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u and buffer start alignment %u required by NIC",
503                                 mb_pool->name,
504                                 rte_pktmbuf_data_room_size(mb_pool),
505                                 RTE_PKTMBUF_HEADROOM, start_alignment);
506                         return 0;
507                 }
508                 buf_aligned = nic_align_start;
509                 buf_size -= start_alignment;
510         } else {
511                 start_alignment = 0;
512         }
513
514         /* Make sure that end padding does not write beyond the buffer */
515         if (buf_aligned < nic_align_end) {
516                 /*
517                  * Estimate space which can be lost. If guarnteed buffer
518                  * size is odd, lost space is (nic_align_end - 1). More
519                  * accurate formula is below.
520                  */
521                 end_padding_alignment = nic_align_end -
522                         MIN(buf_aligned, 1u << (rte_bsf32(buf_size) - 1));
523                 if (buf_size <= end_padding_alignment) {
524                         sfc_err(sa,
525                                 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u, buffer start alignment %u and end padding alignment %u required by NIC",
526                                 mb_pool->name,
527                                 rte_pktmbuf_data_room_size(mb_pool),
528                                 RTE_PKTMBUF_HEADROOM, start_alignment,
529                                 end_padding_alignment);
530                         return 0;
531                 }
532                 buf_size -= end_padding_alignment;
533         } else {
534                 /*
535                  * Start is aligned the same or better than end,
536                  * just align length.
537                  */
538                 buf_size = P2ALIGN(buf_size, nic_align_end);
539         }
540
541         return buf_size;
542 }
543
544 int
545 sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
546              uint16_t nb_rx_desc, unsigned int socket_id,
547              const struct rte_eth_rxconf *rx_conf,
548              struct rte_mempool *mb_pool)
549 {
550         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
551         int rc;
552         uint16_t buf_size;
553         struct sfc_rxq_info *rxq_info;
554         unsigned int evq_index;
555         struct sfc_evq *evq;
556         struct sfc_rxq *rxq;
557
558         rc = sfc_rx_qcheck_conf(sa, rx_conf);
559         if (rc != 0)
560                 goto fail_bad_conf;
561
562         buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool);
563         if (buf_size == 0) {
564                 sfc_err(sa, "RxQ %u mbuf pool object size is too small",
565                         sw_index);
566                 rc = EINVAL;
567                 goto fail_bad_conf;
568         }
569
570         if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
571             !sa->eth_dev->data->dev_conf.rxmode.enable_scatter) {
572                 sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
573                         "object size is too small", sw_index);
574                 sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
575                         "PDU size %u plus Rx prefix %u bytes",
576                         sw_index, buf_size, (unsigned int)sa->port.pdu,
577                         encp->enc_rx_prefix_size);
578                 rc = EINVAL;
579                 goto fail_bad_conf;
580         }
581
582         SFC_ASSERT(sw_index < sa->rxq_count);
583         rxq_info = &sa->rxq_info[sw_index];
584
585         SFC_ASSERT(nb_rx_desc <= rxq_info->max_entries);
586         rxq_info->entries = nb_rx_desc;
587         rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
588
589         evq_index = sfc_evq_index_by_rxq_sw_index(sa, sw_index);
590
591         rc = sfc_ev_qinit(sa, evq_index, rxq_info->entries, socket_id);
592         if (rc != 0)
593                 goto fail_ev_qinit;
594
595         evq = sa->evq_info[evq_index].evq;
596
597         rc = ENOMEM;
598         rxq = rte_zmalloc_socket("sfc-rxq", sizeof(*rxq), RTE_CACHE_LINE_SIZE,
599                                  socket_id);
600         if (rxq == NULL)
601                 goto fail_rxq_alloc;
602
603         rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries),
604                            socket_id, &rxq->mem);
605         if (rc != 0)
606                 goto fail_dma_alloc;
607
608         rc = ENOMEM;
609         rxq->sw_desc = rte_calloc_socket("sfc-rxq-sw_desc", rxq_info->entries,
610                                          sizeof(*rxq->sw_desc),
611                                          RTE_CACHE_LINE_SIZE, socket_id);
612         if (rxq->sw_desc == NULL)
613                 goto fail_desc_alloc;
614
615         evq->rxq = rxq;
616         rxq->evq = evq;
617         rxq->ptr_mask = rxq_info->entries - 1;
618         rxq->refill_mb_pool = mb_pool;
619         rxq->buf_size = buf_size;
620         rxq->hw_index = sw_index;
621         rxq->port_id = sa->eth_dev->data->port_id;
622
623         /* Cache limits required on datapath in RxQ structure */
624         rxq->batch_max = encp->enc_rx_batch_max;
625         rxq->prefix_size = encp->enc_rx_prefix_size;
626
627         rxq->state = SFC_RXQ_INITIALIZED;
628
629         rxq_info->rxq = rxq;
630
631         return 0;
632
633 fail_desc_alloc:
634         sfc_dma_free(sa, &rxq->mem);
635
636 fail_dma_alloc:
637         rte_free(rxq);
638
639 fail_rxq_alloc:
640         sfc_ev_qfini(sa, evq_index);
641
642 fail_ev_qinit:
643         rxq_info->entries = 0;
644
645 fail_bad_conf:
646         sfc_log_init(sa, "failed %d", rc);
647         return rc;
648 }
649
650 void
651 sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
652 {
653         struct sfc_rxq_info *rxq_info;
654         struct sfc_rxq *rxq;
655
656         SFC_ASSERT(sw_index < sa->rxq_count);
657
658         rxq_info = &sa->rxq_info[sw_index];
659
660         rxq = rxq_info->rxq;
661         SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
662
663         rxq_info->rxq = NULL;
664         rxq_info->entries = 0;
665
666         rte_free(rxq->sw_desc);
667         sfc_dma_free(sa, &rxq->mem);
668         rte_free(rxq);
669 }
670
671 int
672 sfc_rx_start(struct sfc_adapter *sa)
673 {
674         unsigned int sw_index;
675         int rc;
676
677         sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
678
679         rc = efx_rx_init(sa->nic);
680         if (rc != 0)
681                 goto fail_rx_init;
682
683         for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
684                 rc = sfc_rx_qstart(sa, sw_index);
685                 if (rc != 0)
686                         goto fail_rx_qstart;
687         }
688
689         return 0;
690
691 fail_rx_qstart:
692         while (sw_index-- > 0)
693                 sfc_rx_qstop(sa, sw_index);
694
695         efx_rx_fini(sa->nic);
696
697 fail_rx_init:
698         sfc_log_init(sa, "failed %d", rc);
699         return rc;
700 }
701
702 void
703 sfc_rx_stop(struct sfc_adapter *sa)
704 {
705         unsigned int sw_index;
706
707         sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
708
709         sw_index = sa->rxq_count;
710         while (sw_index-- > 0) {
711                 if (sa->rxq_info[sw_index].rxq != NULL)
712                         sfc_rx_qstop(sa, sw_index);
713         }
714
715         efx_rx_fini(sa->nic);
716 }
717
718 static int
719 sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
720 {
721         struct sfc_rxq_info *rxq_info = &sa->rxq_info[sw_index];
722         unsigned int max_entries;
723
724         max_entries = EFX_RXQ_MAXNDESCS;
725         SFC_ASSERT(rte_is_power_of_2(max_entries));
726
727         rxq_info->max_entries = max_entries;
728
729         return 0;
730 }
731
732 static int
733 sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
734 {
735         int rc = 0;
736
737         switch (rxmode->mq_mode) {
738         case ETH_MQ_RX_NONE:
739                 /* No special checks are required */
740                 break;
741         default:
742                 sfc_err(sa, "Rx multi-queue mode %u not supported",
743                         rxmode->mq_mode);
744                 rc = EINVAL;
745         }
746
747         if (rxmode->header_split) {
748                 sfc_err(sa, "Header split on Rx not supported");
749                 rc = EINVAL;
750         }
751
752         if (rxmode->hw_vlan_filter) {
753                 sfc_err(sa, "HW VLAN filtering not supported");
754                 rc = EINVAL;
755         }
756
757         if (rxmode->hw_vlan_strip) {
758                 sfc_err(sa, "HW VLAN stripping not supported");
759                 rc = EINVAL;
760         }
761
762         if (rxmode->hw_vlan_extend) {
763                 sfc_err(sa,
764                         "Q-in-Q HW VLAN stripping not supported");
765                 rc = EINVAL;
766         }
767
768         if (!rxmode->hw_strip_crc) {
769                 sfc_warn(sa,
770                          "FCS stripping control not supported - always stripped");
771                 rxmode->hw_strip_crc = 1;
772         }
773
774         if (rxmode->enable_scatter) {
775                 sfc_err(sa, "Scatter on Rx not supported");
776                 rc = EINVAL;
777         }
778
779         if (rxmode->enable_lro) {
780                 sfc_err(sa, "LRO not supported");
781                 rc = EINVAL;
782         }
783
784         return rc;
785 }
786
787 /**
788  * Initialize Rx subsystem.
789  *
790  * Called at device configuration stage when number of receive queues is
791  * specified together with other device level receive configuration.
792  *
793  * It should be used to allocate NUMA-unaware resources.
794  */
795 int
796 sfc_rx_init(struct sfc_adapter *sa)
797 {
798         struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
799         unsigned int sw_index;
800         int rc;
801
802         rc = sfc_rx_check_mode(sa, &dev_conf->rxmode);
803         if (rc != 0)
804                 goto fail_check_mode;
805
806         sa->rxq_count = sa->eth_dev->data->nb_rx_queues;
807
808         rc = ENOMEM;
809         sa->rxq_info = rte_calloc_socket("sfc-rxqs", sa->rxq_count,
810                                          sizeof(struct sfc_rxq_info), 0,
811                                          sa->socket_id);
812         if (sa->rxq_info == NULL)
813                 goto fail_rxqs_alloc;
814
815         for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
816                 rc = sfc_rx_qinit_info(sa, sw_index);
817                 if (rc != 0)
818                         goto fail_rx_qinit_info;
819         }
820
821         return 0;
822
823 fail_rx_qinit_info:
824         rte_free(sa->rxq_info);
825         sa->rxq_info = NULL;
826
827 fail_rxqs_alloc:
828         sa->rxq_count = 0;
829 fail_check_mode:
830         sfc_log_init(sa, "failed %d", rc);
831         return rc;
832 }
833
834 /**
835  * Shutdown Rx subsystem.
836  *
837  * Called at device close stage, for example, before device
838  * reconfiguration or shutdown.
839  */
840 void
841 sfc_rx_fini(struct sfc_adapter *sa)
842 {
843         unsigned int sw_index;
844
845         sw_index = sa->rxq_count;
846         while (sw_index-- > 0) {
847                 if (sa->rxq_info[sw_index].rxq != NULL)
848                         sfc_rx_qfini(sa, sw_index);
849         }
850
851         rte_free(sa->rxq_info);
852         sa->rxq_info = NULL;
853         sa->rxq_count = 0;
854 }