net/sfc: use Rx queue max fill level calculated on init
[dpdk.git] / drivers / net / sfc / sfc_rx.c
1 /*-
2  *   BSD LICENSE
3  *
4  * Copyright (c) 2016-2017 Solarflare Communications Inc.
5  * All rights reserved.
6  *
7  * This software was jointly developed between OKTET Labs (under contract
8  * for Solarflare) and Solarflare Communications, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright notice,
14  *    this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright notice,
16  *    this list of conditions and the following disclaimer in the documentation
17  *    and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31
32 #include <rte_mempool.h>
33
34 #include "efx.h"
35
36 #include "sfc.h"
37 #include "sfc_debug.h"
38 #include "sfc_log.h"
39 #include "sfc_ev.h"
40 #include "sfc_rx.h"
41 #include "sfc_kvargs.h"
42 #include "sfc_tweak.h"
43
44 /*
45  * Maximum number of Rx queue flush attempt in the case of failure or
46  * flush timeout
47  */
48 #define SFC_RX_QFLUSH_ATTEMPTS          (3)
49
50 /*
51  * Time to wait between event queue polling attempts when waiting for Rx
52  * queue flush done or failed events.
53  */
54 #define SFC_RX_QFLUSH_POLL_WAIT_MS      (1)
55
56 /*
57  * Maximum number of event queue polling attempts when waiting for Rx queue
58  * flush done or failed events. It defines Rx queue flush attempt timeout
59  * together with SFC_RX_QFLUSH_POLL_WAIT_MS.
60  */
61 #define SFC_RX_QFLUSH_POLL_ATTEMPTS     (2000)
62
63 void
64 sfc_rx_qflush_done(struct sfc_rxq *rxq)
65 {
66         rxq->state |= SFC_RXQ_FLUSHED;
67         rxq->state &= ~SFC_RXQ_FLUSHING;
68 }
69
70 void
71 sfc_rx_qflush_failed(struct sfc_rxq *rxq)
72 {
73         rxq->state |= SFC_RXQ_FLUSH_FAILED;
74         rxq->state &= ~SFC_RXQ_FLUSHING;
75 }
76
77 static void
78 sfc_efx_rx_qrefill(struct sfc_efx_rxq *rxq)
79 {
80         unsigned int free_space;
81         unsigned int bulks;
82         void *objs[SFC_RX_REFILL_BULK];
83         efsys_dma_addr_t addr[RTE_DIM(objs)];
84         unsigned int added = rxq->added;
85         unsigned int id;
86         unsigned int i;
87         struct sfc_efx_rx_sw_desc *rxd;
88         struct rte_mbuf *m;
89         uint16_t port_id = rxq->dp.dpq.port_id;
90
91         free_space = rxq->max_fill_level - (added - rxq->completed);
92
93         if (free_space < rxq->refill_threshold)
94                 return;
95
96         bulks = free_space / RTE_DIM(objs);
97         /* refill_threshold guarantees that bulks is positive */
98         SFC_ASSERT(bulks > 0);
99
100         id = added & rxq->ptr_mask;
101         do {
102                 if (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
103                                                   RTE_DIM(objs)) < 0)) {
104                         /*
105                          * It is hardly a safe way to increment counter
106                          * from different contexts, but all PMDs do it.
107                          */
108                         rxq->evq->sa->eth_dev->data->rx_mbuf_alloc_failed +=
109                                 RTE_DIM(objs);
110                         /* Return if we have posted nothing yet */
111                         if (added == rxq->added)
112                                 return;
113                         /* Push posted */
114                         break;
115                 }
116
117                 for (i = 0; i < RTE_DIM(objs);
118                      ++i, id = (id + 1) & rxq->ptr_mask) {
119                         m = objs[i];
120
121                         rxd = &rxq->sw_desc[id];
122                         rxd->mbuf = m;
123
124                         SFC_ASSERT(rte_mbuf_refcnt_read(m) == 1);
125                         m->data_off = RTE_PKTMBUF_HEADROOM;
126                         SFC_ASSERT(m->next == NULL);
127                         SFC_ASSERT(m->nb_segs == 1);
128                         m->port = port_id;
129
130                         addr[i] = rte_pktmbuf_iova(m);
131                 }
132
133                 efx_rx_qpost(rxq->common, addr, rxq->buf_size,
134                              RTE_DIM(objs), rxq->completed, added);
135                 added += RTE_DIM(objs);
136         } while (--bulks > 0);
137
138         SFC_ASSERT(added != rxq->added);
139         rxq->added = added;
140         efx_rx_qpush(rxq->common, added, &rxq->pushed);
141 }
142
143 static uint64_t
144 sfc_efx_rx_desc_flags_to_offload_flags(const unsigned int desc_flags)
145 {
146         uint64_t mbuf_flags = 0;
147
148         switch (desc_flags & (EFX_PKT_IPV4 | EFX_CKSUM_IPV4)) {
149         case (EFX_PKT_IPV4 | EFX_CKSUM_IPV4):
150                 mbuf_flags |= PKT_RX_IP_CKSUM_GOOD;
151                 break;
152         case EFX_PKT_IPV4:
153                 mbuf_flags |= PKT_RX_IP_CKSUM_BAD;
154                 break;
155         default:
156                 RTE_BUILD_BUG_ON(PKT_RX_IP_CKSUM_UNKNOWN != 0);
157                 SFC_ASSERT((mbuf_flags & PKT_RX_IP_CKSUM_MASK) ==
158                            PKT_RX_IP_CKSUM_UNKNOWN);
159                 break;
160         }
161
162         switch ((desc_flags &
163                  (EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP))) {
164         case (EFX_PKT_TCP | EFX_CKSUM_TCPUDP):
165         case (EFX_PKT_UDP | EFX_CKSUM_TCPUDP):
166                 mbuf_flags |= PKT_RX_L4_CKSUM_GOOD;
167                 break;
168         case EFX_PKT_TCP:
169         case EFX_PKT_UDP:
170                 mbuf_flags |= PKT_RX_L4_CKSUM_BAD;
171                 break;
172         default:
173                 RTE_BUILD_BUG_ON(PKT_RX_L4_CKSUM_UNKNOWN != 0);
174                 SFC_ASSERT((mbuf_flags & PKT_RX_L4_CKSUM_MASK) ==
175                            PKT_RX_L4_CKSUM_UNKNOWN);
176                 break;
177         }
178
179         return mbuf_flags;
180 }
181
182 static uint32_t
183 sfc_efx_rx_desc_flags_to_packet_type(const unsigned int desc_flags)
184 {
185         return RTE_PTYPE_L2_ETHER |
186                 ((desc_flags & EFX_PKT_IPV4) ?
187                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : 0) |
188                 ((desc_flags & EFX_PKT_IPV6) ?
189                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : 0) |
190                 ((desc_flags & EFX_PKT_TCP) ? RTE_PTYPE_L4_TCP : 0) |
191                 ((desc_flags & EFX_PKT_UDP) ? RTE_PTYPE_L4_UDP : 0);
192 }
193
194 static const uint32_t *
195 sfc_efx_supported_ptypes_get(__rte_unused uint32_t tunnel_encaps)
196 {
197         static const uint32_t ptypes[] = {
198                 RTE_PTYPE_L2_ETHER,
199                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
200                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
201                 RTE_PTYPE_L4_TCP,
202                 RTE_PTYPE_L4_UDP,
203                 RTE_PTYPE_UNKNOWN
204         };
205
206         return ptypes;
207 }
208
209 #if EFSYS_OPT_RX_SCALE
210 static void
211 sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags,
212                         struct rte_mbuf *m)
213 {
214         uint8_t *mbuf_data;
215
216
217         if ((rxq->flags & SFC_EFX_RXQ_FLAG_RSS_HASH) == 0)
218                 return;
219
220         mbuf_data = rte_pktmbuf_mtod(m, uint8_t *);
221
222         if (flags & (EFX_PKT_IPV4 | EFX_PKT_IPV6)) {
223                 m->hash.rss = efx_pseudo_hdr_hash_get(rxq->common,
224                                                       EFX_RX_HASHALG_TOEPLITZ,
225                                                       mbuf_data);
226
227                 m->ol_flags |= PKT_RX_RSS_HASH;
228         }
229 }
230 #else
231 static void
232 sfc_efx_rx_set_rss_hash(__rte_unused struct sfc_efx_rxq *rxq,
233                         __rte_unused unsigned int flags,
234                         __rte_unused struct rte_mbuf *m)
235 {
236 }
237 #endif
238
239 static uint16_t
240 sfc_efx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
241 {
242         struct sfc_dp_rxq *dp_rxq = rx_queue;
243         struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
244         unsigned int completed;
245         unsigned int prefix_size = rxq->prefix_size;
246         unsigned int done_pkts = 0;
247         boolean_t discard_next = B_FALSE;
248         struct rte_mbuf *scatter_pkt = NULL;
249
250         if (unlikely((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0))
251                 return 0;
252
253         sfc_ev_qpoll(rxq->evq);
254
255         completed = rxq->completed;
256         while (completed != rxq->pending && done_pkts < nb_pkts) {
257                 unsigned int id;
258                 struct sfc_efx_rx_sw_desc *rxd;
259                 struct rte_mbuf *m;
260                 unsigned int seg_len;
261                 unsigned int desc_flags;
262
263                 id = completed++ & rxq->ptr_mask;
264                 rxd = &rxq->sw_desc[id];
265                 m = rxd->mbuf;
266                 desc_flags = rxd->flags;
267
268                 if (discard_next)
269                         goto discard;
270
271                 if (desc_flags & (EFX_ADDR_MISMATCH | EFX_DISCARD))
272                         goto discard;
273
274                 if (desc_flags & EFX_PKT_PREFIX_LEN) {
275                         uint16_t tmp_size;
276                         int rc __rte_unused;
277
278                         rc = efx_pseudo_hdr_pkt_length_get(rxq->common,
279                                 rte_pktmbuf_mtod(m, uint8_t *), &tmp_size);
280                         SFC_ASSERT(rc == 0);
281                         seg_len = tmp_size;
282                 } else {
283                         seg_len = rxd->size - prefix_size;
284                 }
285
286                 rte_pktmbuf_data_len(m) = seg_len;
287                 rte_pktmbuf_pkt_len(m) = seg_len;
288
289                 if (scatter_pkt != NULL) {
290                         if (rte_pktmbuf_chain(scatter_pkt, m) != 0) {
291                                 rte_pktmbuf_free(scatter_pkt);
292                                 goto discard;
293                         }
294                         /* The packet to deliver */
295                         m = scatter_pkt;
296                 }
297
298                 if (desc_flags & EFX_PKT_CONT) {
299                         /* The packet is scattered, more fragments to come */
300                         scatter_pkt = m;
301                         /* Further fragments have no prefix */
302                         prefix_size = 0;
303                         continue;
304                 }
305
306                 /* Scattered packet is done */
307                 scatter_pkt = NULL;
308                 /* The first fragment of the packet has prefix */
309                 prefix_size = rxq->prefix_size;
310
311                 m->ol_flags =
312                         sfc_efx_rx_desc_flags_to_offload_flags(desc_flags);
313                 m->packet_type =
314                         sfc_efx_rx_desc_flags_to_packet_type(desc_flags);
315
316                 /*
317                  * Extract RSS hash from the packet prefix and
318                  * set the corresponding field (if needed and possible)
319                  */
320                 sfc_efx_rx_set_rss_hash(rxq, desc_flags, m);
321
322                 m->data_off += prefix_size;
323
324                 *rx_pkts++ = m;
325                 done_pkts++;
326                 continue;
327
328 discard:
329                 discard_next = ((desc_flags & EFX_PKT_CONT) != 0);
330                 rte_mempool_put(rxq->refill_mb_pool, m);
331                 rxd->mbuf = NULL;
332         }
333
334         /* pending is only moved when entire packet is received */
335         SFC_ASSERT(scatter_pkt == NULL);
336
337         rxq->completed = completed;
338
339         sfc_efx_rx_qrefill(rxq);
340
341         return done_pkts;
342 }
343
344 static sfc_dp_rx_qdesc_npending_t sfc_efx_rx_qdesc_npending;
345 static unsigned int
346 sfc_efx_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq)
347 {
348         struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
349
350         if ((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0)
351                 return 0;
352
353         sfc_ev_qpoll(rxq->evq);
354
355         return rxq->pending - rxq->completed;
356 }
357
358 static sfc_dp_rx_qdesc_status_t sfc_efx_rx_qdesc_status;
359 static int
360 sfc_efx_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset)
361 {
362         struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
363
364         if (unlikely(offset > rxq->ptr_mask))
365                 return -EINVAL;
366
367         /*
368          * Poll EvQ to derive up-to-date 'rxq->pending' figure;
369          * it is required for the queue to be running, but the
370          * check is omitted because API design assumes that it
371          * is the duty of the caller to satisfy all conditions
372          */
373         SFC_ASSERT((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) ==
374                    SFC_EFX_RXQ_FLAG_RUNNING);
375         sfc_ev_qpoll(rxq->evq);
376
377         /*
378          * There is a handful of reserved entries in the ring,
379          * but an explicit check whether the offset points to
380          * a reserved entry is neglected since the two checks
381          * below rely on the figures which take the HW limits
382          * into account and thus if an entry is reserved, the
383          * checks will fail and UNAVAIL code will be returned
384          */
385
386         if (offset < (rxq->pending - rxq->completed))
387                 return RTE_ETH_RX_DESC_DONE;
388
389         if (offset < (rxq->added - rxq->completed))
390                 return RTE_ETH_RX_DESC_AVAIL;
391
392         return RTE_ETH_RX_DESC_UNAVAIL;
393 }
394
395 struct sfc_rxq *
396 sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
397 {
398         const struct sfc_dp_queue *dpq = &dp_rxq->dpq;
399         struct rte_eth_dev *eth_dev;
400         struct sfc_adapter *sa;
401         struct sfc_rxq *rxq;
402
403         SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
404         eth_dev = &rte_eth_devices[dpq->port_id];
405
406         sa = eth_dev->data->dev_private;
407
408         SFC_ASSERT(dpq->queue_id < sa->rxq_count);
409         rxq = sa->rxq_info[dpq->queue_id].rxq;
410
411         SFC_ASSERT(rxq != NULL);
412         return rxq;
413 }
414
415 static sfc_dp_rx_qsize_up_rings_t sfc_efx_rx_qsize_up_rings;
416 static int
417 sfc_efx_rx_qsize_up_rings(uint16_t nb_rx_desc,
418                           unsigned int *rxq_entries,
419                           unsigned int *evq_entries,
420                           unsigned int *rxq_max_fill_level)
421 {
422         *rxq_entries = nb_rx_desc;
423         *evq_entries = nb_rx_desc;
424         *rxq_max_fill_level = EFX_RXQ_LIMIT(*rxq_entries);
425         return 0;
426 }
427
428 static sfc_dp_rx_qcreate_t sfc_efx_rx_qcreate;
429 static int
430 sfc_efx_rx_qcreate(uint16_t port_id, uint16_t queue_id,
431                    const struct rte_pci_addr *pci_addr, int socket_id,
432                    const struct sfc_dp_rx_qcreate_info *info,
433                    struct sfc_dp_rxq **dp_rxqp)
434 {
435         struct sfc_efx_rxq *rxq;
436         int rc;
437
438         rc = ENOMEM;
439         rxq = rte_zmalloc_socket("sfc-efx-rxq", sizeof(*rxq),
440                                  RTE_CACHE_LINE_SIZE, socket_id);
441         if (rxq == NULL)
442                 goto fail_rxq_alloc;
443
444         sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
445
446         rc = ENOMEM;
447         rxq->sw_desc = rte_calloc_socket("sfc-efx-rxq-sw_desc",
448                                          info->rxq_entries,
449                                          sizeof(*rxq->sw_desc),
450                                          RTE_CACHE_LINE_SIZE, socket_id);
451         if (rxq->sw_desc == NULL)
452                 goto fail_desc_alloc;
453
454         /* efx datapath is bound to efx control path */
455         rxq->evq = sfc_rxq_by_dp_rxq(&rxq->dp)->evq;
456         if (info->flags & SFC_RXQ_FLAG_RSS_HASH)
457                 rxq->flags |= SFC_EFX_RXQ_FLAG_RSS_HASH;
458         rxq->ptr_mask = info->rxq_entries - 1;
459         rxq->batch_max = info->batch_max;
460         rxq->prefix_size = info->prefix_size;
461         rxq->max_fill_level = info->max_fill_level;
462         rxq->refill_threshold = info->refill_threshold;
463         rxq->buf_size = info->buf_size;
464         rxq->refill_mb_pool = info->refill_mb_pool;
465
466         *dp_rxqp = &rxq->dp;
467         return 0;
468
469 fail_desc_alloc:
470         rte_free(rxq);
471
472 fail_rxq_alloc:
473         return rc;
474 }
475
476 static sfc_dp_rx_qdestroy_t sfc_efx_rx_qdestroy;
477 static void
478 sfc_efx_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
479 {
480         struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
481
482         rte_free(rxq->sw_desc);
483         rte_free(rxq);
484 }
485
486 static sfc_dp_rx_qstart_t sfc_efx_rx_qstart;
487 static int
488 sfc_efx_rx_qstart(struct sfc_dp_rxq *dp_rxq,
489                   __rte_unused unsigned int evq_read_ptr)
490 {
491         /* libefx-based datapath is specific to libefx-based PMD */
492         struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
493         struct sfc_rxq *crxq = sfc_rxq_by_dp_rxq(dp_rxq);
494
495         rxq->common = crxq->common;
496
497         rxq->pending = rxq->completed = rxq->added = rxq->pushed = 0;
498
499         sfc_efx_rx_qrefill(rxq);
500
501         rxq->flags |= (SFC_EFX_RXQ_FLAG_STARTED | SFC_EFX_RXQ_FLAG_RUNNING);
502
503         return 0;
504 }
505
506 static sfc_dp_rx_qstop_t sfc_efx_rx_qstop;
507 static void
508 sfc_efx_rx_qstop(struct sfc_dp_rxq *dp_rxq,
509                  __rte_unused unsigned int *evq_read_ptr)
510 {
511         struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
512
513         rxq->flags &= ~SFC_EFX_RXQ_FLAG_RUNNING;
514
515         /* libefx-based datapath is bound to libefx-based PMD and uses
516          * event queue structure directly. So, there is no necessity to
517          * return EvQ read pointer.
518          */
519 }
520
521 static sfc_dp_rx_qpurge_t sfc_efx_rx_qpurge;
522 static void
523 sfc_efx_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
524 {
525         struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
526         unsigned int i;
527         struct sfc_efx_rx_sw_desc *rxd;
528
529         for (i = rxq->completed; i != rxq->added; ++i) {
530                 rxd = &rxq->sw_desc[i & rxq->ptr_mask];
531                 rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
532                 rxd->mbuf = NULL;
533                 /* Packed stream relies on 0 in inactive SW desc.
534                  * Rx queue stop is not performance critical, so
535                  * there is no harm to do it always.
536                  */
537                 rxd->flags = 0;
538                 rxd->size = 0;
539         }
540
541         rxq->flags &= ~SFC_EFX_RXQ_FLAG_STARTED;
542 }
543
544 struct sfc_dp_rx sfc_efx_rx = {
545         .dp = {
546                 .name           = SFC_KVARG_DATAPATH_EFX,
547                 .type           = SFC_DP_RX,
548                 .hw_fw_caps     = 0,
549         },
550         .features               = SFC_DP_RX_FEAT_SCATTER,
551         .qsize_up_rings         = sfc_efx_rx_qsize_up_rings,
552         .qcreate                = sfc_efx_rx_qcreate,
553         .qdestroy               = sfc_efx_rx_qdestroy,
554         .qstart                 = sfc_efx_rx_qstart,
555         .qstop                  = sfc_efx_rx_qstop,
556         .qpurge                 = sfc_efx_rx_qpurge,
557         .supported_ptypes_get   = sfc_efx_supported_ptypes_get,
558         .qdesc_npending         = sfc_efx_rx_qdesc_npending,
559         .qdesc_status           = sfc_efx_rx_qdesc_status,
560         .pkt_burst              = sfc_efx_recv_pkts,
561 };
562
563 unsigned int
564 sfc_rx_qdesc_npending(struct sfc_adapter *sa, unsigned int sw_index)
565 {
566         struct sfc_rxq *rxq;
567
568         SFC_ASSERT(sw_index < sa->rxq_count);
569         rxq = sa->rxq_info[sw_index].rxq;
570
571         if (rxq == NULL || (rxq->state & SFC_RXQ_STARTED) == 0)
572                 return 0;
573
574         return sa->dp_rx->qdesc_npending(rxq->dp);
575 }
576
577 int
578 sfc_rx_qdesc_done(struct sfc_dp_rxq *dp_rxq, unsigned int offset)
579 {
580         struct sfc_rxq *rxq = sfc_rxq_by_dp_rxq(dp_rxq);
581
582         return offset < rxq->evq->sa->dp_rx->qdesc_npending(dp_rxq);
583 }
584
585 static void
586 sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
587 {
588         struct sfc_rxq *rxq;
589         unsigned int retry_count;
590         unsigned int wait_count;
591         int rc;
592
593         rxq = sa->rxq_info[sw_index].rxq;
594         SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
595
596         /*
597          * Retry Rx queue flushing in the case of flush failed or
598          * timeout. In the worst case it can delay for 6 seconds.
599          */
600         for (retry_count = 0;
601              ((rxq->state & SFC_RXQ_FLUSHED) == 0) &&
602              (retry_count < SFC_RX_QFLUSH_ATTEMPTS);
603              ++retry_count) {
604                 rc = efx_rx_qflush(rxq->common);
605                 if (rc != 0) {
606                         rxq->state |= (rc == EALREADY) ?
607                                 SFC_RXQ_FLUSHED : SFC_RXQ_FLUSH_FAILED;
608                         break;
609                 }
610                 rxq->state &= ~SFC_RXQ_FLUSH_FAILED;
611                 rxq->state |= SFC_RXQ_FLUSHING;
612
613                 /*
614                  * Wait for Rx queue flush done or failed event at least
615                  * SFC_RX_QFLUSH_POLL_WAIT_MS milliseconds and not more
616                  * than 2 seconds (SFC_RX_QFLUSH_POLL_WAIT_MS multiplied
617                  * by SFC_RX_QFLUSH_POLL_ATTEMPTS).
618                  */
619                 wait_count = 0;
620                 do {
621                         rte_delay_ms(SFC_RX_QFLUSH_POLL_WAIT_MS);
622                         sfc_ev_qpoll(rxq->evq);
623                 } while ((rxq->state & SFC_RXQ_FLUSHING) &&
624                          (wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS));
625
626                 if (rxq->state & SFC_RXQ_FLUSHING)
627                         sfc_err(sa, "RxQ %u flush timed out", sw_index);
628
629                 if (rxq->state & SFC_RXQ_FLUSH_FAILED)
630                         sfc_err(sa, "RxQ %u flush failed", sw_index);
631
632                 if (rxq->state & SFC_RXQ_FLUSHED)
633                         sfc_info(sa, "RxQ %u flushed", sw_index);
634         }
635
636         sa->dp_rx->qpurge(rxq->dp);
637 }
638
639 static int
640 sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq)
641 {
642         boolean_t rss = (sa->rss_channels > 0) ? B_TRUE : B_FALSE;
643         struct sfc_port *port = &sa->port;
644         int rc;
645
646         /*
647          * If promiscuous or all-multicast mode has been requested, setting
648          * filter for the default Rx queue might fail, in particular, while
649          * running over PCI function which is not a member of corresponding
650          * privilege groups; if this occurs, few iterations will be made to
651          * repeat this step without promiscuous and all-multicast flags set
652          */
653 retry:
654         rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common, rss);
655         if (rc == 0)
656                 return 0;
657         else if (rc != EOPNOTSUPP)
658                 return rc;
659
660         if (port->promisc) {
661                 sfc_warn(sa, "promiscuous mode has been requested, "
662                              "but the HW rejects it");
663                 sfc_warn(sa, "promiscuous mode will be disabled");
664
665                 port->promisc = B_FALSE;
666                 rc = sfc_set_rx_mode(sa);
667                 if (rc != 0)
668                         return rc;
669
670                 goto retry;
671         }
672
673         if (port->allmulti) {
674                 sfc_warn(sa, "all-multicast mode has been requested, "
675                              "but the HW rejects it");
676                 sfc_warn(sa, "all-multicast mode will be disabled");
677
678                 port->allmulti = B_FALSE;
679                 rc = sfc_set_rx_mode(sa);
680                 if (rc != 0)
681                         return rc;
682
683                 goto retry;
684         }
685
686         return rc;
687 }
688
689 int
690 sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
691 {
692         struct sfc_port *port = &sa->port;
693         struct sfc_rxq_info *rxq_info;
694         struct sfc_rxq *rxq;
695         struct sfc_evq *evq;
696         int rc;
697
698         sfc_log_init(sa, "sw_index=%u", sw_index);
699
700         SFC_ASSERT(sw_index < sa->rxq_count);
701
702         rxq_info = &sa->rxq_info[sw_index];
703         rxq = rxq_info->rxq;
704         SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
705
706         evq = rxq->evq;
707
708         rc = sfc_ev_qstart(evq, sfc_evq_index_by_rxq_sw_index(sa, sw_index));
709         if (rc != 0)
710                 goto fail_ev_qstart;
711
712         rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
713                             &rxq->mem, rxq_info->entries,
714                             0 /* not used on EF10 */, rxq_info->type_flags,
715                             evq->common, &rxq->common);
716         if (rc != 0)
717                 goto fail_rx_qcreate;
718
719         efx_rx_qenable(rxq->common);
720
721         rc = sa->dp_rx->qstart(rxq->dp, evq->read_ptr);
722         if (rc != 0)
723                 goto fail_dp_qstart;
724
725         rxq->state |= SFC_RXQ_STARTED;
726
727         if ((sw_index == 0) && !port->isolated) {
728                 rc = sfc_rx_default_rxq_set_filter(sa, rxq);
729                 if (rc != 0)
730                         goto fail_mac_filter_default_rxq_set;
731         }
732
733         /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
734         sa->eth_dev->data->rx_queue_state[sw_index] =
735                 RTE_ETH_QUEUE_STATE_STARTED;
736
737         return 0;
738
739 fail_mac_filter_default_rxq_set:
740         sa->dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr);
741
742 fail_dp_qstart:
743         sfc_rx_qflush(sa, sw_index);
744
745 fail_rx_qcreate:
746         sfc_ev_qstop(evq);
747
748 fail_ev_qstart:
749         return rc;
750 }
751
752 void
753 sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
754 {
755         struct sfc_rxq_info *rxq_info;
756         struct sfc_rxq *rxq;
757
758         sfc_log_init(sa, "sw_index=%u", sw_index);
759
760         SFC_ASSERT(sw_index < sa->rxq_count);
761
762         rxq_info = &sa->rxq_info[sw_index];
763         rxq = rxq_info->rxq;
764
765         if (rxq->state == SFC_RXQ_INITIALIZED)
766                 return;
767         SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
768
769         /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
770         sa->eth_dev->data->rx_queue_state[sw_index] =
771                 RTE_ETH_QUEUE_STATE_STOPPED;
772
773         sa->dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr);
774
775         if (sw_index == 0)
776                 efx_mac_filter_default_rxq_clear(sa->nic);
777
778         sfc_rx_qflush(sa, sw_index);
779
780         rxq->state = SFC_RXQ_INITIALIZED;
781
782         efx_rx_qdestroy(rxq->common);
783
784         sfc_ev_qstop(rxq->evq);
785 }
786
787 static int
788 sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
789                    const struct rte_eth_rxconf *rx_conf)
790 {
791         int rc = 0;
792
793         if (rx_conf->rx_thresh.pthresh != 0 ||
794             rx_conf->rx_thresh.hthresh != 0 ||
795             rx_conf->rx_thresh.wthresh != 0) {
796                 sfc_warn(sa,
797                         "RxQ prefetch/host/writeback thresholds are not supported");
798         }
799
800         if (rx_conf->rx_free_thresh > rxq_max_fill_level) {
801                 sfc_err(sa,
802                         "RxQ free threshold too large: %u vs maximum %u",
803                         rx_conf->rx_free_thresh, rxq_max_fill_level);
804                 rc = EINVAL;
805         }
806
807         if (rx_conf->rx_drop_en == 0) {
808                 sfc_err(sa, "RxQ drop disable is not supported");
809                 rc = EINVAL;
810         }
811
812         return rc;
813 }
814
815 static unsigned int
816 sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool)
817 {
818         uint32_t data_off;
819         uint32_t order;
820
821         /* The mbuf object itself is always cache line aligned */
822         order = rte_bsf32(RTE_CACHE_LINE_SIZE);
823
824         /* Data offset from mbuf object start */
825         data_off = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mb_pool) +
826                 RTE_PKTMBUF_HEADROOM;
827
828         order = MIN(order, rte_bsf32(data_off));
829
830         return 1u << (order - 1);
831 }
832
833 static uint16_t
834 sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool)
835 {
836         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
837         const uint32_t nic_align_start = MAX(1, encp->enc_rx_buf_align_start);
838         const uint32_t nic_align_end = MAX(1, encp->enc_rx_buf_align_end);
839         uint16_t buf_size;
840         unsigned int buf_aligned;
841         unsigned int start_alignment;
842         unsigned int end_padding_alignment;
843
844         /* Below it is assumed that both alignments are power of 2 */
845         SFC_ASSERT(rte_is_power_of_2(nic_align_start));
846         SFC_ASSERT(rte_is_power_of_2(nic_align_end));
847
848         /*
849          * mbuf is always cache line aligned, double-check
850          * that it meets rx buffer start alignment requirements.
851          */
852
853         /* Start from mbuf pool data room size */
854         buf_size = rte_pktmbuf_data_room_size(mb_pool);
855
856         /* Remove headroom */
857         if (buf_size <= RTE_PKTMBUF_HEADROOM) {
858                 sfc_err(sa,
859                         "RxQ mbuf pool %s object data room size %u is smaller than headroom %u",
860                         mb_pool->name, buf_size, RTE_PKTMBUF_HEADROOM);
861                 return 0;
862         }
863         buf_size -= RTE_PKTMBUF_HEADROOM;
864
865         /* Calculate guaranteed data start alignment */
866         buf_aligned = sfc_rx_mbuf_data_alignment(mb_pool);
867
868         /* Reserve space for start alignment */
869         if (buf_aligned < nic_align_start) {
870                 start_alignment = nic_align_start - buf_aligned;
871                 if (buf_size <= start_alignment) {
872                         sfc_err(sa,
873                                 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u and buffer start alignment %u required by NIC",
874                                 mb_pool->name,
875                                 rte_pktmbuf_data_room_size(mb_pool),
876                                 RTE_PKTMBUF_HEADROOM, start_alignment);
877                         return 0;
878                 }
879                 buf_aligned = nic_align_start;
880                 buf_size -= start_alignment;
881         } else {
882                 start_alignment = 0;
883         }
884
885         /* Make sure that end padding does not write beyond the buffer */
886         if (buf_aligned < nic_align_end) {
887                 /*
888                  * Estimate space which can be lost. If guarnteed buffer
889                  * size is odd, lost space is (nic_align_end - 1). More
890                  * accurate formula is below.
891                  */
892                 end_padding_alignment = nic_align_end -
893                         MIN(buf_aligned, 1u << (rte_bsf32(buf_size) - 1));
894                 if (buf_size <= end_padding_alignment) {
895                         sfc_err(sa,
896                                 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u, buffer start alignment %u and end padding alignment %u required by NIC",
897                                 mb_pool->name,
898                                 rte_pktmbuf_data_room_size(mb_pool),
899                                 RTE_PKTMBUF_HEADROOM, start_alignment,
900                                 end_padding_alignment);
901                         return 0;
902                 }
903                 buf_size -= end_padding_alignment;
904         } else {
905                 /*
906                  * Start is aligned the same or better than end,
907                  * just align length.
908                  */
909                 buf_size = P2ALIGN(buf_size, nic_align_end);
910         }
911
912         return buf_size;
913 }
914
915 int
916 sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
917              uint16_t nb_rx_desc, unsigned int socket_id,
918              const struct rte_eth_rxconf *rx_conf,
919              struct rte_mempool *mb_pool)
920 {
921         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
922         int rc;
923         unsigned int rxq_entries;
924         unsigned int evq_entries;
925         unsigned int rxq_max_fill_level;
926         uint16_t buf_size;
927         struct sfc_rxq_info *rxq_info;
928         struct sfc_evq *evq;
929         struct sfc_rxq *rxq;
930         struct sfc_dp_rx_qcreate_info info;
931
932         rc = sa->dp_rx->qsize_up_rings(nb_rx_desc, &rxq_entries, &evq_entries,
933                                        &rxq_max_fill_level);
934         if (rc != 0)
935                 goto fail_size_up_rings;
936
937         rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf);
938         if (rc != 0)
939                 goto fail_bad_conf;
940
941         buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool);
942         if (buf_size == 0) {
943                 sfc_err(sa, "RxQ %u mbuf pool object size is too small",
944                         sw_index);
945                 rc = EINVAL;
946                 goto fail_bad_conf;
947         }
948
949         if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
950             !sa->eth_dev->data->dev_conf.rxmode.enable_scatter) {
951                 sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
952                         "object size is too small", sw_index);
953                 sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
954                         "PDU size %u plus Rx prefix %u bytes",
955                         sw_index, buf_size, (unsigned int)sa->port.pdu,
956                         encp->enc_rx_prefix_size);
957                 rc = EINVAL;
958                 goto fail_bad_conf;
959         }
960
961         SFC_ASSERT(sw_index < sa->rxq_count);
962         rxq_info = &sa->rxq_info[sw_index];
963
964         SFC_ASSERT(rxq_entries <= rxq_info->max_entries);
965         rxq_info->entries = rxq_entries;
966         rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
967         rxq_info->type_flags =
968                 sa->eth_dev->data->dev_conf.rxmode.enable_scatter ?
969                 EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
970
971         if ((encp->enc_tunnel_encapsulations_supported != 0) &&
972             (sa->dp_rx->features & SFC_DP_RX_FEAT_TUNNELS))
973                 rxq_info->type_flags |= EFX_RXQ_FLAG_INNER_CLASSES;
974
975         rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_RX, sw_index,
976                           evq_entries, socket_id, &evq);
977         if (rc != 0)
978                 goto fail_ev_qinit;
979
980         rc = ENOMEM;
981         rxq = rte_zmalloc_socket("sfc-rxq", sizeof(*rxq), RTE_CACHE_LINE_SIZE,
982                                  socket_id);
983         if (rxq == NULL)
984                 goto fail_rxq_alloc;
985
986         rxq_info->rxq = rxq;
987
988         rxq->evq = evq;
989         rxq->hw_index = sw_index;
990         rxq->refill_threshold =
991                 RTE_MAX(rx_conf->rx_free_thresh, SFC_RX_REFILL_BULK);
992         rxq->refill_mb_pool = mb_pool;
993
994         rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries),
995                            socket_id, &rxq->mem);
996         if (rc != 0)
997                 goto fail_dma_alloc;
998
999         memset(&info, 0, sizeof(info));
1000         info.refill_mb_pool = rxq->refill_mb_pool;
1001         info.max_fill_level = rxq_max_fill_level;
1002         info.refill_threshold = rxq->refill_threshold;
1003         info.buf_size = buf_size;
1004         info.batch_max = encp->enc_rx_batch_max;
1005         info.prefix_size = encp->enc_rx_prefix_size;
1006
1007 #if EFSYS_OPT_RX_SCALE
1008         if (sa->hash_support == EFX_RX_HASH_AVAILABLE && sa->rss_channels > 0)
1009                 info.flags |= SFC_RXQ_FLAG_RSS_HASH;
1010 #endif
1011
1012         info.rxq_entries = rxq_info->entries;
1013         info.rxq_hw_ring = rxq->mem.esm_base;
1014         info.evq_entries = evq_entries;
1015         info.evq_hw_ring = evq->mem.esm_base;
1016         info.hw_index = rxq->hw_index;
1017         info.mem_bar = sa->mem_bar.esb_base;
1018
1019         rc = sa->dp_rx->qcreate(sa->eth_dev->data->port_id, sw_index,
1020                                 &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
1021                                 socket_id, &info, &rxq->dp);
1022         if (rc != 0)
1023                 goto fail_dp_rx_qcreate;
1024
1025         evq->dp_rxq = rxq->dp;
1026
1027         rxq->state = SFC_RXQ_INITIALIZED;
1028
1029         rxq_info->deferred_start = (rx_conf->rx_deferred_start != 0);
1030
1031         return 0;
1032
1033 fail_dp_rx_qcreate:
1034         sfc_dma_free(sa, &rxq->mem);
1035
1036 fail_dma_alloc:
1037         rxq_info->rxq = NULL;
1038         rte_free(rxq);
1039
1040 fail_rxq_alloc:
1041         sfc_ev_qfini(evq);
1042
1043 fail_ev_qinit:
1044         rxq_info->entries = 0;
1045
1046 fail_bad_conf:
1047 fail_size_up_rings:
1048         sfc_log_init(sa, "failed %d", rc);
1049         return rc;
1050 }
1051
1052 void
1053 sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
1054 {
1055         struct sfc_rxq_info *rxq_info;
1056         struct sfc_rxq *rxq;
1057
1058         SFC_ASSERT(sw_index < sa->rxq_count);
1059
1060         rxq_info = &sa->rxq_info[sw_index];
1061
1062         rxq = rxq_info->rxq;
1063         SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
1064
1065         sa->dp_rx->qdestroy(rxq->dp);
1066         rxq->dp = NULL;
1067
1068         rxq_info->rxq = NULL;
1069         rxq_info->entries = 0;
1070
1071         sfc_dma_free(sa, &rxq->mem);
1072
1073         sfc_ev_qfini(rxq->evq);
1074         rxq->evq = NULL;
1075
1076         rte_free(rxq);
1077 }
1078
1079 #if EFSYS_OPT_RX_SCALE
1080 efx_rx_hash_type_t
1081 sfc_rte_to_efx_hash_type(uint64_t rss_hf)
1082 {
1083         efx_rx_hash_type_t efx_hash_types = 0;
1084
1085         if ((rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
1086                        ETH_RSS_NONFRAG_IPV4_OTHER)) != 0)
1087                 efx_hash_types |= EFX_RX_HASH_IPV4;
1088
1089         if ((rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) != 0)
1090                 efx_hash_types |= EFX_RX_HASH_TCPIPV4;
1091
1092         if ((rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
1093                         ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX)) != 0)
1094                 efx_hash_types |= EFX_RX_HASH_IPV6;
1095
1096         if ((rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX)) != 0)
1097                 efx_hash_types |= EFX_RX_HASH_TCPIPV6;
1098
1099         return efx_hash_types;
1100 }
1101
1102 uint64_t
1103 sfc_efx_to_rte_hash_type(efx_rx_hash_type_t efx_hash_types)
1104 {
1105         uint64_t rss_hf = 0;
1106
1107         if ((efx_hash_types & EFX_RX_HASH_IPV4) != 0)
1108                 rss_hf |= (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
1109                            ETH_RSS_NONFRAG_IPV4_OTHER);
1110
1111         if ((efx_hash_types & EFX_RX_HASH_TCPIPV4) != 0)
1112                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1113
1114         if ((efx_hash_types & EFX_RX_HASH_IPV6) != 0)
1115                 rss_hf |= (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
1116                            ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX);
1117
1118         if ((efx_hash_types & EFX_RX_HASH_TCPIPV6) != 0)
1119                 rss_hf |= (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX);
1120
1121         return rss_hf;
1122 }
1123 #endif
1124
1125 #if EFSYS_OPT_RX_SCALE
1126 static int
1127 sfc_rx_rss_config(struct sfc_adapter *sa)
1128 {
1129         int rc = 0;
1130
1131         if (sa->rss_channels > 0) {
1132                 rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1133                                            EFX_RX_HASHALG_TOEPLITZ,
1134                                            sa->rss_hash_types, B_TRUE);
1135                 if (rc != 0)
1136                         goto finish;
1137
1138                 rc = efx_rx_scale_key_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1139                                           sa->rss_key,
1140                                           sizeof(sa->rss_key));
1141                 if (rc != 0)
1142                         goto finish;
1143
1144                 rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1145                                           sa->rss_tbl, RTE_DIM(sa->rss_tbl));
1146         }
1147
1148 finish:
1149         return rc;
1150 }
1151 #else
1152 static int
1153 sfc_rx_rss_config(__rte_unused struct sfc_adapter *sa)
1154 {
1155         return 0;
1156 }
1157 #endif
1158
1159 int
1160 sfc_rx_start(struct sfc_adapter *sa)
1161 {
1162         unsigned int sw_index;
1163         int rc;
1164
1165         sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
1166
1167         rc = efx_rx_init(sa->nic);
1168         if (rc != 0)
1169                 goto fail_rx_init;
1170
1171         rc = sfc_rx_rss_config(sa);
1172         if (rc != 0)
1173                 goto fail_rss_config;
1174
1175         for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
1176                 if ((!sa->rxq_info[sw_index].deferred_start ||
1177                      sa->rxq_info[sw_index].deferred_started)) {
1178                         rc = sfc_rx_qstart(sa, sw_index);
1179                         if (rc != 0)
1180                                 goto fail_rx_qstart;
1181                 }
1182         }
1183
1184         return 0;
1185
1186 fail_rx_qstart:
1187         while (sw_index-- > 0)
1188                 sfc_rx_qstop(sa, sw_index);
1189
1190 fail_rss_config:
1191         efx_rx_fini(sa->nic);
1192
1193 fail_rx_init:
1194         sfc_log_init(sa, "failed %d", rc);
1195         return rc;
1196 }
1197
1198 void
1199 sfc_rx_stop(struct sfc_adapter *sa)
1200 {
1201         unsigned int sw_index;
1202
1203         sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
1204
1205         sw_index = sa->rxq_count;
1206         while (sw_index-- > 0) {
1207                 if (sa->rxq_info[sw_index].rxq != NULL)
1208                         sfc_rx_qstop(sa, sw_index);
1209         }
1210
1211         efx_rx_fini(sa->nic);
1212 }
1213
1214 static int
1215 sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
1216 {
1217         struct sfc_rxq_info *rxq_info = &sa->rxq_info[sw_index];
1218         unsigned int max_entries;
1219
1220         max_entries = EFX_RXQ_MAXNDESCS;
1221         SFC_ASSERT(rte_is_power_of_2(max_entries));
1222
1223         rxq_info->max_entries = max_entries;
1224
1225         return 0;
1226 }
1227
1228 static int
1229 sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
1230 {
1231         int rc = 0;
1232
1233         switch (rxmode->mq_mode) {
1234         case ETH_MQ_RX_NONE:
1235                 /* No special checks are required */
1236                 break;
1237 #if EFSYS_OPT_RX_SCALE
1238         case ETH_MQ_RX_RSS:
1239                 if (sa->rss_support == EFX_RX_SCALE_UNAVAILABLE) {
1240                         sfc_err(sa, "RSS is not available");
1241                         rc = EINVAL;
1242                 }
1243                 break;
1244 #endif
1245         default:
1246                 sfc_err(sa, "Rx multi-queue mode %u not supported",
1247                         rxmode->mq_mode);
1248                 rc = EINVAL;
1249         }
1250
1251         if (rxmode->header_split) {
1252                 sfc_err(sa, "Header split on Rx not supported");
1253                 rc = EINVAL;
1254         }
1255
1256         if (rxmode->hw_vlan_filter) {
1257                 sfc_err(sa, "HW VLAN filtering not supported");
1258                 rc = EINVAL;
1259         }
1260
1261         if (rxmode->hw_vlan_strip) {
1262                 sfc_err(sa, "HW VLAN stripping not supported");
1263                 rc = EINVAL;
1264         }
1265
1266         if (rxmode->hw_vlan_extend) {
1267                 sfc_err(sa,
1268                         "Q-in-Q HW VLAN stripping not supported");
1269                 rc = EINVAL;
1270         }
1271
1272         if (!rxmode->hw_strip_crc) {
1273                 sfc_warn(sa,
1274                          "FCS stripping control not supported - always stripped");
1275                 rxmode->hw_strip_crc = 1;
1276         }
1277
1278         if (rxmode->enable_scatter &&
1279             (~sa->dp_rx->features & SFC_DP_RX_FEAT_SCATTER)) {
1280                 sfc_err(sa, "Rx scatter not supported by %s datapath",
1281                         sa->dp_rx->dp.name);
1282                 rc = EINVAL;
1283         }
1284
1285         if (rxmode->enable_lro) {
1286                 sfc_err(sa, "LRO not supported");
1287                 rc = EINVAL;
1288         }
1289
1290         return rc;
1291 }
1292
1293 /**
1294  * Destroy excess queues that are no longer needed after reconfiguration
1295  * or complete close.
1296  */
1297 static void
1298 sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues)
1299 {
1300         int sw_index;
1301
1302         SFC_ASSERT(nb_rx_queues <= sa->rxq_count);
1303
1304         sw_index = sa->rxq_count;
1305         while (--sw_index >= (int)nb_rx_queues) {
1306                 if (sa->rxq_info[sw_index].rxq != NULL)
1307                         sfc_rx_qfini(sa, sw_index);
1308         }
1309
1310         sa->rxq_count = nb_rx_queues;
1311 }
1312
1313 /**
1314  * Initialize Rx subsystem.
1315  *
1316  * Called at device (re)configuration stage when number of receive queues is
1317  * specified together with other device level receive configuration.
1318  *
1319  * It should be used to allocate NUMA-unaware resources.
1320  */
1321 int
1322 sfc_rx_configure(struct sfc_adapter *sa)
1323 {
1324         struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
1325         const unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues;
1326         int rc;
1327
1328         sfc_log_init(sa, "nb_rx_queues=%u (old %u)",
1329                      nb_rx_queues, sa->rxq_count);
1330
1331         rc = sfc_rx_check_mode(sa, &dev_conf->rxmode);
1332         if (rc != 0)
1333                 goto fail_check_mode;
1334
1335         if (nb_rx_queues == sa->rxq_count)
1336                 goto done;
1337
1338         if (sa->rxq_info == NULL) {
1339                 rc = ENOMEM;
1340                 sa->rxq_info = rte_calloc_socket("sfc-rxqs", nb_rx_queues,
1341                                                  sizeof(sa->rxq_info[0]), 0,
1342                                                  sa->socket_id);
1343                 if (sa->rxq_info == NULL)
1344                         goto fail_rxqs_alloc;
1345         } else {
1346                 struct sfc_rxq_info *new_rxq_info;
1347
1348                 if (nb_rx_queues < sa->rxq_count)
1349                         sfc_rx_fini_queues(sa, nb_rx_queues);
1350
1351                 rc = ENOMEM;
1352                 new_rxq_info =
1353                         rte_realloc(sa->rxq_info,
1354                                     nb_rx_queues * sizeof(sa->rxq_info[0]), 0);
1355                 if (new_rxq_info == NULL && nb_rx_queues > 0)
1356                         goto fail_rxqs_realloc;
1357
1358                 sa->rxq_info = new_rxq_info;
1359                 if (nb_rx_queues > sa->rxq_count)
1360                         memset(&sa->rxq_info[sa->rxq_count], 0,
1361                                (nb_rx_queues - sa->rxq_count) *
1362                                sizeof(sa->rxq_info[0]));
1363         }
1364
1365         while (sa->rxq_count < nb_rx_queues) {
1366                 rc = sfc_rx_qinit_info(sa, sa->rxq_count);
1367                 if (rc != 0)
1368                         goto fail_rx_qinit_info;
1369
1370                 sa->rxq_count++;
1371         }
1372
1373 #if EFSYS_OPT_RX_SCALE
1374         sa->rss_channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
1375                            MIN(sa->rxq_count, EFX_MAXRSS) : 0;
1376
1377         if (sa->rss_channels > 0) {
1378                 unsigned int sw_index;
1379
1380                 for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index)
1381                         sa->rss_tbl[sw_index] = sw_index % sa->rss_channels;
1382         }
1383 #endif
1384
1385 done:
1386         return 0;
1387
1388 fail_rx_qinit_info:
1389 fail_rxqs_realloc:
1390 fail_rxqs_alloc:
1391         sfc_rx_close(sa);
1392
1393 fail_check_mode:
1394         sfc_log_init(sa, "failed %d", rc);
1395         return rc;
1396 }
1397
1398 /**
1399  * Shutdown Rx subsystem.
1400  *
1401  * Called at device close stage, for example, before device shutdown.
1402  */
1403 void
1404 sfc_rx_close(struct sfc_adapter *sa)
1405 {
1406         sfc_rx_fini_queues(sa, 0);
1407
1408         sa->rss_channels = 0;
1409
1410         rte_free(sa->rxq_info);
1411         sa->rxq_info = NULL;
1412 }