net/sfc: allow one Rx queue entry carry many packet buffers
[dpdk.git] / drivers / net / sfc / sfc_rx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2016-2018 Solarflare Communications Inc.
4  * All rights reserved.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9
10 #include <rte_mempool.h>
11
12 #include "efx.h"
13
14 #include "sfc.h"
15 #include "sfc_debug.h"
16 #include "sfc_log.h"
17 #include "sfc_ev.h"
18 #include "sfc_rx.h"
19 #include "sfc_kvargs.h"
20 #include "sfc_tweak.h"
21
22 /*
23  * Maximum number of Rx queue flush attempt in the case of failure or
24  * flush timeout
25  */
26 #define SFC_RX_QFLUSH_ATTEMPTS          (3)
27
28 /*
29  * Time to wait between event queue polling attempts when waiting for Rx
30  * queue flush done or failed events.
31  */
32 #define SFC_RX_QFLUSH_POLL_WAIT_MS      (1)
33
34 /*
35  * Maximum number of event queue polling attempts when waiting for Rx queue
36  * flush done or failed events. It defines Rx queue flush attempt timeout
37  * together with SFC_RX_QFLUSH_POLL_WAIT_MS.
38  */
39 #define SFC_RX_QFLUSH_POLL_ATTEMPTS     (2000)
40
41 void
42 sfc_rx_qflush_done(struct sfc_rxq *rxq)
43 {
44         rxq->state |= SFC_RXQ_FLUSHED;
45         rxq->state &= ~SFC_RXQ_FLUSHING;
46 }
47
48 void
49 sfc_rx_qflush_failed(struct sfc_rxq *rxq)
50 {
51         rxq->state |= SFC_RXQ_FLUSH_FAILED;
52         rxq->state &= ~SFC_RXQ_FLUSHING;
53 }
54
55 static void
56 sfc_efx_rx_qrefill(struct sfc_efx_rxq *rxq)
57 {
58         unsigned int free_space;
59         unsigned int bulks;
60         void *objs[SFC_RX_REFILL_BULK];
61         efsys_dma_addr_t addr[RTE_DIM(objs)];
62         unsigned int added = rxq->added;
63         unsigned int id;
64         unsigned int i;
65         struct sfc_efx_rx_sw_desc *rxd;
66         struct rte_mbuf *m;
67         uint16_t port_id = rxq->dp.dpq.port_id;
68
69         free_space = rxq->max_fill_level - (added - rxq->completed);
70
71         if (free_space < rxq->refill_threshold)
72                 return;
73
74         bulks = free_space / RTE_DIM(objs);
75         /* refill_threshold guarantees that bulks is positive */
76         SFC_ASSERT(bulks > 0);
77
78         id = added & rxq->ptr_mask;
79         do {
80                 if (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
81                                                   RTE_DIM(objs)) < 0)) {
82                         /*
83                          * It is hardly a safe way to increment counter
84                          * from different contexts, but all PMDs do it.
85                          */
86                         rxq->evq->sa->eth_dev->data->rx_mbuf_alloc_failed +=
87                                 RTE_DIM(objs);
88                         /* Return if we have posted nothing yet */
89                         if (added == rxq->added)
90                                 return;
91                         /* Push posted */
92                         break;
93                 }
94
95                 for (i = 0; i < RTE_DIM(objs);
96                      ++i, id = (id + 1) & rxq->ptr_mask) {
97                         m = objs[i];
98
99                         rxd = &rxq->sw_desc[id];
100                         rxd->mbuf = m;
101
102                         SFC_ASSERT(rte_mbuf_refcnt_read(m) == 1);
103                         m->data_off = RTE_PKTMBUF_HEADROOM;
104                         SFC_ASSERT(m->next == NULL);
105                         SFC_ASSERT(m->nb_segs == 1);
106                         m->port = port_id;
107
108                         addr[i] = rte_pktmbuf_iova(m);
109                 }
110
111                 efx_rx_qpost(rxq->common, addr, rxq->buf_size,
112                              RTE_DIM(objs), rxq->completed, added);
113                 added += RTE_DIM(objs);
114         } while (--bulks > 0);
115
116         SFC_ASSERT(added != rxq->added);
117         rxq->added = added;
118         efx_rx_qpush(rxq->common, added, &rxq->pushed);
119 }
120
121 static uint64_t
122 sfc_efx_rx_desc_flags_to_offload_flags(const unsigned int desc_flags)
123 {
124         uint64_t mbuf_flags = 0;
125
126         switch (desc_flags & (EFX_PKT_IPV4 | EFX_CKSUM_IPV4)) {
127         case (EFX_PKT_IPV4 | EFX_CKSUM_IPV4):
128                 mbuf_flags |= PKT_RX_IP_CKSUM_GOOD;
129                 break;
130         case EFX_PKT_IPV4:
131                 mbuf_flags |= PKT_RX_IP_CKSUM_BAD;
132                 break;
133         default:
134                 RTE_BUILD_BUG_ON(PKT_RX_IP_CKSUM_UNKNOWN != 0);
135                 SFC_ASSERT((mbuf_flags & PKT_RX_IP_CKSUM_MASK) ==
136                            PKT_RX_IP_CKSUM_UNKNOWN);
137                 break;
138         }
139
140         switch ((desc_flags &
141                  (EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP))) {
142         case (EFX_PKT_TCP | EFX_CKSUM_TCPUDP):
143         case (EFX_PKT_UDP | EFX_CKSUM_TCPUDP):
144                 mbuf_flags |= PKT_RX_L4_CKSUM_GOOD;
145                 break;
146         case EFX_PKT_TCP:
147         case EFX_PKT_UDP:
148                 mbuf_flags |= PKT_RX_L4_CKSUM_BAD;
149                 break;
150         default:
151                 RTE_BUILD_BUG_ON(PKT_RX_L4_CKSUM_UNKNOWN != 0);
152                 SFC_ASSERT((mbuf_flags & PKT_RX_L4_CKSUM_MASK) ==
153                            PKT_RX_L4_CKSUM_UNKNOWN);
154                 break;
155         }
156
157         return mbuf_flags;
158 }
159
160 static uint32_t
161 sfc_efx_rx_desc_flags_to_packet_type(const unsigned int desc_flags)
162 {
163         return RTE_PTYPE_L2_ETHER |
164                 ((desc_flags & EFX_PKT_IPV4) ?
165                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : 0) |
166                 ((desc_flags & EFX_PKT_IPV6) ?
167                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : 0) |
168                 ((desc_flags & EFX_PKT_TCP) ? RTE_PTYPE_L4_TCP : 0) |
169                 ((desc_flags & EFX_PKT_UDP) ? RTE_PTYPE_L4_UDP : 0);
170 }
171
172 static const uint32_t *
173 sfc_efx_supported_ptypes_get(__rte_unused uint32_t tunnel_encaps)
174 {
175         static const uint32_t ptypes[] = {
176                 RTE_PTYPE_L2_ETHER,
177                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
178                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
179                 RTE_PTYPE_L4_TCP,
180                 RTE_PTYPE_L4_UDP,
181                 RTE_PTYPE_UNKNOWN
182         };
183
184         return ptypes;
185 }
186
187 static void
188 sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags,
189                         struct rte_mbuf *m)
190 {
191         uint8_t *mbuf_data;
192
193
194         if ((rxq->flags & SFC_EFX_RXQ_FLAG_RSS_HASH) == 0)
195                 return;
196
197         mbuf_data = rte_pktmbuf_mtod(m, uint8_t *);
198
199         if (flags & (EFX_PKT_IPV4 | EFX_PKT_IPV6)) {
200                 m->hash.rss = efx_pseudo_hdr_hash_get(rxq->common,
201                                                       EFX_RX_HASHALG_TOEPLITZ,
202                                                       mbuf_data);
203
204                 m->ol_flags |= PKT_RX_RSS_HASH;
205         }
206 }
207
208 static uint16_t
209 sfc_efx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
210 {
211         struct sfc_dp_rxq *dp_rxq = rx_queue;
212         struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
213         unsigned int completed;
214         unsigned int prefix_size = rxq->prefix_size;
215         unsigned int done_pkts = 0;
216         boolean_t discard_next = B_FALSE;
217         struct rte_mbuf *scatter_pkt = NULL;
218
219         if (unlikely((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0))
220                 return 0;
221
222         sfc_ev_qpoll(rxq->evq);
223
224         completed = rxq->completed;
225         while (completed != rxq->pending && done_pkts < nb_pkts) {
226                 unsigned int id;
227                 struct sfc_efx_rx_sw_desc *rxd;
228                 struct rte_mbuf *m;
229                 unsigned int seg_len;
230                 unsigned int desc_flags;
231
232                 id = completed++ & rxq->ptr_mask;
233                 rxd = &rxq->sw_desc[id];
234                 m = rxd->mbuf;
235                 desc_flags = rxd->flags;
236
237                 if (discard_next)
238                         goto discard;
239
240                 if (desc_flags & (EFX_ADDR_MISMATCH | EFX_DISCARD))
241                         goto discard;
242
243                 if (desc_flags & EFX_PKT_PREFIX_LEN) {
244                         uint16_t tmp_size;
245                         int rc __rte_unused;
246
247                         rc = efx_pseudo_hdr_pkt_length_get(rxq->common,
248                                 rte_pktmbuf_mtod(m, uint8_t *), &tmp_size);
249                         SFC_ASSERT(rc == 0);
250                         seg_len = tmp_size;
251                 } else {
252                         seg_len = rxd->size - prefix_size;
253                 }
254
255                 rte_pktmbuf_data_len(m) = seg_len;
256                 rte_pktmbuf_pkt_len(m) = seg_len;
257
258                 if (scatter_pkt != NULL) {
259                         if (rte_pktmbuf_chain(scatter_pkt, m) != 0) {
260                                 rte_pktmbuf_free(scatter_pkt);
261                                 goto discard;
262                         }
263                         /* The packet to deliver */
264                         m = scatter_pkt;
265                 }
266
267                 if (desc_flags & EFX_PKT_CONT) {
268                         /* The packet is scattered, more fragments to come */
269                         scatter_pkt = m;
270                         /* Further fragments have no prefix */
271                         prefix_size = 0;
272                         continue;
273                 }
274
275                 /* Scattered packet is done */
276                 scatter_pkt = NULL;
277                 /* The first fragment of the packet has prefix */
278                 prefix_size = rxq->prefix_size;
279
280                 m->ol_flags =
281                         sfc_efx_rx_desc_flags_to_offload_flags(desc_flags);
282                 m->packet_type =
283                         sfc_efx_rx_desc_flags_to_packet_type(desc_flags);
284
285                 /*
286                  * Extract RSS hash from the packet prefix and
287                  * set the corresponding field (if needed and possible)
288                  */
289                 sfc_efx_rx_set_rss_hash(rxq, desc_flags, m);
290
291                 m->data_off += prefix_size;
292
293                 *rx_pkts++ = m;
294                 done_pkts++;
295                 continue;
296
297 discard:
298                 discard_next = ((desc_flags & EFX_PKT_CONT) != 0);
299                 rte_mempool_put(rxq->refill_mb_pool, m);
300                 rxd->mbuf = NULL;
301         }
302
303         /* pending is only moved when entire packet is received */
304         SFC_ASSERT(scatter_pkt == NULL);
305
306         rxq->completed = completed;
307
308         sfc_efx_rx_qrefill(rxq);
309
310         return done_pkts;
311 }
312
313 static sfc_dp_rx_qdesc_npending_t sfc_efx_rx_qdesc_npending;
314 static unsigned int
315 sfc_efx_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq)
316 {
317         struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
318
319         if ((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0)
320                 return 0;
321
322         sfc_ev_qpoll(rxq->evq);
323
324         return rxq->pending - rxq->completed;
325 }
326
327 static sfc_dp_rx_qdesc_status_t sfc_efx_rx_qdesc_status;
328 static int
329 sfc_efx_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset)
330 {
331         struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
332
333         if (unlikely(offset > rxq->ptr_mask))
334                 return -EINVAL;
335
336         /*
337          * Poll EvQ to derive up-to-date 'rxq->pending' figure;
338          * it is required for the queue to be running, but the
339          * check is omitted because API design assumes that it
340          * is the duty of the caller to satisfy all conditions
341          */
342         SFC_ASSERT((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) ==
343                    SFC_EFX_RXQ_FLAG_RUNNING);
344         sfc_ev_qpoll(rxq->evq);
345
346         /*
347          * There is a handful of reserved entries in the ring,
348          * but an explicit check whether the offset points to
349          * a reserved entry is neglected since the two checks
350          * below rely on the figures which take the HW limits
351          * into account and thus if an entry is reserved, the
352          * checks will fail and UNAVAIL code will be returned
353          */
354
355         if (offset < (rxq->pending - rxq->completed))
356                 return RTE_ETH_RX_DESC_DONE;
357
358         if (offset < (rxq->added - rxq->completed))
359                 return RTE_ETH_RX_DESC_AVAIL;
360
361         return RTE_ETH_RX_DESC_UNAVAIL;
362 }
363
364 struct sfc_rxq *
365 sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
366 {
367         const struct sfc_dp_queue *dpq = &dp_rxq->dpq;
368         struct rte_eth_dev *eth_dev;
369         struct sfc_adapter *sa;
370         struct sfc_rxq *rxq;
371
372         SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
373         eth_dev = &rte_eth_devices[dpq->port_id];
374
375         sa = eth_dev->data->dev_private;
376
377         SFC_ASSERT(dpq->queue_id < sa->rxq_count);
378         rxq = sa->rxq_info[dpq->queue_id].rxq;
379
380         SFC_ASSERT(rxq != NULL);
381         return rxq;
382 }
383
384 static sfc_dp_rx_qsize_up_rings_t sfc_efx_rx_qsize_up_rings;
385 static int
386 sfc_efx_rx_qsize_up_rings(uint16_t nb_rx_desc,
387                           unsigned int *rxq_entries,
388                           unsigned int *evq_entries,
389                           unsigned int *rxq_max_fill_level)
390 {
391         *rxq_entries = nb_rx_desc;
392         *evq_entries = nb_rx_desc;
393         *rxq_max_fill_level = EFX_RXQ_LIMIT(*rxq_entries);
394         return 0;
395 }
396
397 static sfc_dp_rx_qcreate_t sfc_efx_rx_qcreate;
398 static int
399 sfc_efx_rx_qcreate(uint16_t port_id, uint16_t queue_id,
400                    const struct rte_pci_addr *pci_addr, int socket_id,
401                    const struct sfc_dp_rx_qcreate_info *info,
402                    struct sfc_dp_rxq **dp_rxqp)
403 {
404         struct sfc_efx_rxq *rxq;
405         int rc;
406
407         rc = ENOMEM;
408         rxq = rte_zmalloc_socket("sfc-efx-rxq", sizeof(*rxq),
409                                  RTE_CACHE_LINE_SIZE, socket_id);
410         if (rxq == NULL)
411                 goto fail_rxq_alloc;
412
413         sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
414
415         rc = ENOMEM;
416         rxq->sw_desc = rte_calloc_socket("sfc-efx-rxq-sw_desc",
417                                          info->rxq_entries,
418                                          sizeof(*rxq->sw_desc),
419                                          RTE_CACHE_LINE_SIZE, socket_id);
420         if (rxq->sw_desc == NULL)
421                 goto fail_desc_alloc;
422
423         /* efx datapath is bound to efx control path */
424         rxq->evq = sfc_rxq_by_dp_rxq(&rxq->dp)->evq;
425         if (info->flags & SFC_RXQ_FLAG_RSS_HASH)
426                 rxq->flags |= SFC_EFX_RXQ_FLAG_RSS_HASH;
427         rxq->ptr_mask = info->rxq_entries - 1;
428         rxq->batch_max = info->batch_max;
429         rxq->prefix_size = info->prefix_size;
430         rxq->max_fill_level = info->max_fill_level;
431         rxq->refill_threshold = info->refill_threshold;
432         rxq->buf_size = info->buf_size;
433         rxq->refill_mb_pool = info->refill_mb_pool;
434
435         *dp_rxqp = &rxq->dp;
436         return 0;
437
438 fail_desc_alloc:
439         rte_free(rxq);
440
441 fail_rxq_alloc:
442         return rc;
443 }
444
445 static sfc_dp_rx_qdestroy_t sfc_efx_rx_qdestroy;
446 static void
447 sfc_efx_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
448 {
449         struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
450
451         rte_free(rxq->sw_desc);
452         rte_free(rxq);
453 }
454
455 static sfc_dp_rx_qstart_t sfc_efx_rx_qstart;
456 static int
457 sfc_efx_rx_qstart(struct sfc_dp_rxq *dp_rxq,
458                   __rte_unused unsigned int evq_read_ptr)
459 {
460         /* libefx-based datapath is specific to libefx-based PMD */
461         struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
462         struct sfc_rxq *crxq = sfc_rxq_by_dp_rxq(dp_rxq);
463
464         rxq->common = crxq->common;
465
466         rxq->pending = rxq->completed = rxq->added = rxq->pushed = 0;
467
468         sfc_efx_rx_qrefill(rxq);
469
470         rxq->flags |= (SFC_EFX_RXQ_FLAG_STARTED | SFC_EFX_RXQ_FLAG_RUNNING);
471
472         return 0;
473 }
474
475 static sfc_dp_rx_qstop_t sfc_efx_rx_qstop;
476 static void
477 sfc_efx_rx_qstop(struct sfc_dp_rxq *dp_rxq,
478                  __rte_unused unsigned int *evq_read_ptr)
479 {
480         struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
481
482         rxq->flags &= ~SFC_EFX_RXQ_FLAG_RUNNING;
483
484         /* libefx-based datapath is bound to libefx-based PMD and uses
485          * event queue structure directly. So, there is no necessity to
486          * return EvQ read pointer.
487          */
488 }
489
490 static sfc_dp_rx_qpurge_t sfc_efx_rx_qpurge;
491 static void
492 sfc_efx_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
493 {
494         struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
495         unsigned int i;
496         struct sfc_efx_rx_sw_desc *rxd;
497
498         for (i = rxq->completed; i != rxq->added; ++i) {
499                 rxd = &rxq->sw_desc[i & rxq->ptr_mask];
500                 rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
501                 rxd->mbuf = NULL;
502                 /* Packed stream relies on 0 in inactive SW desc.
503                  * Rx queue stop is not performance critical, so
504                  * there is no harm to do it always.
505                  */
506                 rxd->flags = 0;
507                 rxd->size = 0;
508         }
509
510         rxq->flags &= ~SFC_EFX_RXQ_FLAG_STARTED;
511 }
512
513 struct sfc_dp_rx sfc_efx_rx = {
514         .dp = {
515                 .name           = SFC_KVARG_DATAPATH_EFX,
516                 .type           = SFC_DP_RX,
517                 .hw_fw_caps     = 0,
518         },
519         .features               = SFC_DP_RX_FEAT_SCATTER,
520         .qsize_up_rings         = sfc_efx_rx_qsize_up_rings,
521         .qcreate                = sfc_efx_rx_qcreate,
522         .qdestroy               = sfc_efx_rx_qdestroy,
523         .qstart                 = sfc_efx_rx_qstart,
524         .qstop                  = sfc_efx_rx_qstop,
525         .qpurge                 = sfc_efx_rx_qpurge,
526         .supported_ptypes_get   = sfc_efx_supported_ptypes_get,
527         .qdesc_npending         = sfc_efx_rx_qdesc_npending,
528         .qdesc_status           = sfc_efx_rx_qdesc_status,
529         .pkt_burst              = sfc_efx_recv_pkts,
530 };
531
532 unsigned int
533 sfc_rx_qdesc_npending(struct sfc_adapter *sa, unsigned int sw_index)
534 {
535         struct sfc_rxq *rxq;
536
537         SFC_ASSERT(sw_index < sa->rxq_count);
538         rxq = sa->rxq_info[sw_index].rxq;
539
540         if (rxq == NULL || (rxq->state & SFC_RXQ_STARTED) == 0)
541                 return 0;
542
543         return sa->dp_rx->qdesc_npending(rxq->dp);
544 }
545
546 int
547 sfc_rx_qdesc_done(struct sfc_dp_rxq *dp_rxq, unsigned int offset)
548 {
549         struct sfc_rxq *rxq = sfc_rxq_by_dp_rxq(dp_rxq);
550
551         return offset < rxq->evq->sa->dp_rx->qdesc_npending(dp_rxq);
552 }
553
554 static void
555 sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
556 {
557         struct sfc_rxq *rxq;
558         unsigned int retry_count;
559         unsigned int wait_count;
560         int rc;
561
562         rxq = sa->rxq_info[sw_index].rxq;
563         SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
564
565         /*
566          * Retry Rx queue flushing in the case of flush failed or
567          * timeout. In the worst case it can delay for 6 seconds.
568          */
569         for (retry_count = 0;
570              ((rxq->state & SFC_RXQ_FLUSHED) == 0) &&
571              (retry_count < SFC_RX_QFLUSH_ATTEMPTS);
572              ++retry_count) {
573                 rc = efx_rx_qflush(rxq->common);
574                 if (rc != 0) {
575                         rxq->state |= (rc == EALREADY) ?
576                                 SFC_RXQ_FLUSHED : SFC_RXQ_FLUSH_FAILED;
577                         break;
578                 }
579                 rxq->state &= ~SFC_RXQ_FLUSH_FAILED;
580                 rxq->state |= SFC_RXQ_FLUSHING;
581
582                 /*
583                  * Wait for Rx queue flush done or failed event at least
584                  * SFC_RX_QFLUSH_POLL_WAIT_MS milliseconds and not more
585                  * than 2 seconds (SFC_RX_QFLUSH_POLL_WAIT_MS multiplied
586                  * by SFC_RX_QFLUSH_POLL_ATTEMPTS).
587                  */
588                 wait_count = 0;
589                 do {
590                         rte_delay_ms(SFC_RX_QFLUSH_POLL_WAIT_MS);
591                         sfc_ev_qpoll(rxq->evq);
592                 } while ((rxq->state & SFC_RXQ_FLUSHING) &&
593                          (wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS));
594
595                 if (rxq->state & SFC_RXQ_FLUSHING)
596                         sfc_err(sa, "RxQ %u flush timed out", sw_index);
597
598                 if (rxq->state & SFC_RXQ_FLUSH_FAILED)
599                         sfc_err(sa, "RxQ %u flush failed", sw_index);
600
601                 if (rxq->state & SFC_RXQ_FLUSHED)
602                         sfc_notice(sa, "RxQ %u flushed", sw_index);
603         }
604
605         sa->dp_rx->qpurge(rxq->dp);
606 }
607
608 static int
609 sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq)
610 {
611         struct sfc_rss *rss = &sa->rss;
612         boolean_t need_rss = (rss->channels > 0) ? B_TRUE : B_FALSE;
613         struct sfc_port *port = &sa->port;
614         int rc;
615
616         /*
617          * If promiscuous or all-multicast mode has been requested, setting
618          * filter for the default Rx queue might fail, in particular, while
619          * running over PCI function which is not a member of corresponding
620          * privilege groups; if this occurs, few iterations will be made to
621          * repeat this step without promiscuous and all-multicast flags set
622          */
623 retry:
624         rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common, need_rss);
625         if (rc == 0)
626                 return 0;
627         else if (rc != EOPNOTSUPP)
628                 return rc;
629
630         if (port->promisc) {
631                 sfc_warn(sa, "promiscuous mode has been requested, "
632                              "but the HW rejects it");
633                 sfc_warn(sa, "promiscuous mode will be disabled");
634
635                 port->promisc = B_FALSE;
636                 rc = sfc_set_rx_mode(sa);
637                 if (rc != 0)
638                         return rc;
639
640                 goto retry;
641         }
642
643         if (port->allmulti) {
644                 sfc_warn(sa, "all-multicast mode has been requested, "
645                              "but the HW rejects it");
646                 sfc_warn(sa, "all-multicast mode will be disabled");
647
648                 port->allmulti = B_FALSE;
649                 rc = sfc_set_rx_mode(sa);
650                 if (rc != 0)
651                         return rc;
652
653                 goto retry;
654         }
655
656         return rc;
657 }
658
659 int
660 sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
661 {
662         struct sfc_port *port = &sa->port;
663         struct sfc_rxq_info *rxq_info;
664         struct sfc_rxq *rxq;
665         struct sfc_evq *evq;
666         int rc;
667
668         sfc_log_init(sa, "sw_index=%u", sw_index);
669
670         SFC_ASSERT(sw_index < sa->rxq_count);
671
672         rxq_info = &sa->rxq_info[sw_index];
673         rxq = rxq_info->rxq;
674         SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
675
676         evq = rxq->evq;
677
678         rc = sfc_ev_qstart(evq, sfc_evq_index_by_rxq_sw_index(sa, sw_index));
679         if (rc != 0)
680                 goto fail_ev_qstart;
681
682         rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
683                             &rxq->mem, rxq_info->entries,
684                             0 /* not used on EF10 */, rxq_info->type_flags,
685                             evq->common, &rxq->common);
686         if (rc != 0)
687                 goto fail_rx_qcreate;
688
689         efx_rx_qenable(rxq->common);
690
691         rc = sa->dp_rx->qstart(rxq->dp, evq->read_ptr);
692         if (rc != 0)
693                 goto fail_dp_qstart;
694
695         rxq->state |= SFC_RXQ_STARTED;
696
697         if ((sw_index == 0) && !port->isolated) {
698                 rc = sfc_rx_default_rxq_set_filter(sa, rxq);
699                 if (rc != 0)
700                         goto fail_mac_filter_default_rxq_set;
701         }
702
703         /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
704         sa->eth_dev->data->rx_queue_state[sw_index] =
705                 RTE_ETH_QUEUE_STATE_STARTED;
706
707         return 0;
708
709 fail_mac_filter_default_rxq_set:
710         sa->dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr);
711
712 fail_dp_qstart:
713         sfc_rx_qflush(sa, sw_index);
714
715 fail_rx_qcreate:
716         sfc_ev_qstop(evq);
717
718 fail_ev_qstart:
719         return rc;
720 }
721
722 void
723 sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
724 {
725         struct sfc_rxq_info *rxq_info;
726         struct sfc_rxq *rxq;
727
728         sfc_log_init(sa, "sw_index=%u", sw_index);
729
730         SFC_ASSERT(sw_index < sa->rxq_count);
731
732         rxq_info = &sa->rxq_info[sw_index];
733         rxq = rxq_info->rxq;
734
735         if (rxq->state == SFC_RXQ_INITIALIZED)
736                 return;
737         SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
738
739         /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
740         sa->eth_dev->data->rx_queue_state[sw_index] =
741                 RTE_ETH_QUEUE_STATE_STOPPED;
742
743         sa->dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr);
744
745         if (sw_index == 0)
746                 efx_mac_filter_default_rxq_clear(sa->nic);
747
748         sfc_rx_qflush(sa, sw_index);
749
750         rxq->state = SFC_RXQ_INITIALIZED;
751
752         efx_rx_qdestroy(rxq->common);
753
754         sfc_ev_qstop(rxq->evq);
755 }
756
757 uint64_t
758 sfc_rx_get_dev_offload_caps(struct sfc_adapter *sa)
759 {
760         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
761         uint64_t caps = 0;
762
763         caps |= DEV_RX_OFFLOAD_JUMBO_FRAME;
764         caps |= DEV_RX_OFFLOAD_CRC_STRIP;
765         caps |= DEV_RX_OFFLOAD_IPV4_CKSUM;
766         caps |= DEV_RX_OFFLOAD_UDP_CKSUM;
767         caps |= DEV_RX_OFFLOAD_TCP_CKSUM;
768
769         if (encp->enc_tunnel_encapsulations_supported &&
770             (sa->dp_rx->features & SFC_DP_RX_FEAT_TUNNELS))
771                 caps |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
772
773         return caps;
774 }
775
776 uint64_t
777 sfc_rx_get_queue_offload_caps(struct sfc_adapter *sa)
778 {
779         uint64_t caps = 0;
780
781         if (sa->dp_rx->features & SFC_DP_RX_FEAT_SCATTER)
782                 caps |= DEV_RX_OFFLOAD_SCATTER;
783
784         return caps;
785 }
786
787 static void
788 sfc_rx_log_offloads(struct sfc_adapter *sa, const char *offload_group,
789                     const char *verdict, uint64_t offloads)
790 {
791         unsigned long long bit;
792
793         while ((bit = __builtin_ffsll(offloads)) != 0) {
794                 uint64_t flag = (1ULL << --bit);
795
796                 sfc_err(sa, "Rx %s offload %s %s", offload_group,
797                         rte_eth_dev_rx_offload_name(flag), verdict);
798
799                 offloads &= ~flag;
800         }
801 }
802
803 static boolean_t
804 sfc_rx_queue_offloads_mismatch(struct sfc_adapter *sa, uint64_t requested)
805 {
806         uint64_t mandatory = sa->eth_dev->data->dev_conf.rxmode.offloads;
807         uint64_t supported = sfc_rx_get_dev_offload_caps(sa) |
808                              sfc_rx_get_queue_offload_caps(sa);
809         uint64_t rejected = requested & ~supported;
810         uint64_t missing = (requested & mandatory) ^ mandatory;
811         boolean_t mismatch = B_FALSE;
812
813         if (rejected) {
814                 sfc_rx_log_offloads(sa, "queue", "is unsupported", rejected);
815                 mismatch = B_TRUE;
816         }
817
818         if (missing) {
819                 sfc_rx_log_offloads(sa, "queue", "must be set", missing);
820                 mismatch = B_TRUE;
821         }
822
823         return mismatch;
824 }
825
826 static int
827 sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
828                    const struct rte_eth_rxconf *rx_conf)
829 {
830         uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) |
831                                       sfc_rx_get_queue_offload_caps(sa);
832         int rc = 0;
833
834         if (rx_conf->rx_thresh.pthresh != 0 ||
835             rx_conf->rx_thresh.hthresh != 0 ||
836             rx_conf->rx_thresh.wthresh != 0) {
837                 sfc_warn(sa,
838                         "RxQ prefetch/host/writeback thresholds are not supported");
839         }
840
841         if (rx_conf->rx_free_thresh > rxq_max_fill_level) {
842                 sfc_err(sa,
843                         "RxQ free threshold too large: %u vs maximum %u",
844                         rx_conf->rx_free_thresh, rxq_max_fill_level);
845                 rc = EINVAL;
846         }
847
848         if (rx_conf->rx_drop_en == 0) {
849                 sfc_err(sa, "RxQ drop disable is not supported");
850                 rc = EINVAL;
851         }
852
853         if ((rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
854             DEV_RX_OFFLOAD_CHECKSUM)
855                 sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
856
857         if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
858             (~rx_conf->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
859                 sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on");
860
861         if (sfc_rx_queue_offloads_mismatch(sa, rx_conf->offloads))
862                 rc = EINVAL;
863
864         return rc;
865 }
866
867 static unsigned int
868 sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool)
869 {
870         uint32_t data_off;
871         uint32_t order;
872
873         /* The mbuf object itself is always cache line aligned */
874         order = rte_bsf32(RTE_CACHE_LINE_SIZE);
875
876         /* Data offset from mbuf object start */
877         data_off = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mb_pool) +
878                 RTE_PKTMBUF_HEADROOM;
879
880         order = MIN(order, rte_bsf32(data_off));
881
882         return 1u << order;
883 }
884
885 static uint16_t
886 sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool)
887 {
888         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
889         const uint32_t nic_align_start = MAX(1, encp->enc_rx_buf_align_start);
890         const uint32_t nic_align_end = MAX(1, encp->enc_rx_buf_align_end);
891         uint16_t buf_size;
892         unsigned int buf_aligned;
893         unsigned int start_alignment;
894         unsigned int end_padding_alignment;
895
896         /* Below it is assumed that both alignments are power of 2 */
897         SFC_ASSERT(rte_is_power_of_2(nic_align_start));
898         SFC_ASSERT(rte_is_power_of_2(nic_align_end));
899
900         /*
901          * mbuf is always cache line aligned, double-check
902          * that it meets rx buffer start alignment requirements.
903          */
904
905         /* Start from mbuf pool data room size */
906         buf_size = rte_pktmbuf_data_room_size(mb_pool);
907
908         /* Remove headroom */
909         if (buf_size <= RTE_PKTMBUF_HEADROOM) {
910                 sfc_err(sa,
911                         "RxQ mbuf pool %s object data room size %u is smaller than headroom %u",
912                         mb_pool->name, buf_size, RTE_PKTMBUF_HEADROOM);
913                 return 0;
914         }
915         buf_size -= RTE_PKTMBUF_HEADROOM;
916
917         /* Calculate guaranteed data start alignment */
918         buf_aligned = sfc_rx_mbuf_data_alignment(mb_pool);
919
920         /* Reserve space for start alignment */
921         if (buf_aligned < nic_align_start) {
922                 start_alignment = nic_align_start - buf_aligned;
923                 if (buf_size <= start_alignment) {
924                         sfc_err(sa,
925                                 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u and buffer start alignment %u required by NIC",
926                                 mb_pool->name,
927                                 rte_pktmbuf_data_room_size(mb_pool),
928                                 RTE_PKTMBUF_HEADROOM, start_alignment);
929                         return 0;
930                 }
931                 buf_aligned = nic_align_start;
932                 buf_size -= start_alignment;
933         } else {
934                 start_alignment = 0;
935         }
936
937         /* Make sure that end padding does not write beyond the buffer */
938         if (buf_aligned < nic_align_end) {
939                 /*
940                  * Estimate space which can be lost. If guarnteed buffer
941                  * size is odd, lost space is (nic_align_end - 1). More
942                  * accurate formula is below.
943                  */
944                 end_padding_alignment = nic_align_end -
945                         MIN(buf_aligned, 1u << (rte_bsf32(buf_size) - 1));
946                 if (buf_size <= end_padding_alignment) {
947                         sfc_err(sa,
948                                 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u, buffer start alignment %u and end padding alignment %u required by NIC",
949                                 mb_pool->name,
950                                 rte_pktmbuf_data_room_size(mb_pool),
951                                 RTE_PKTMBUF_HEADROOM, start_alignment,
952                                 end_padding_alignment);
953                         return 0;
954                 }
955                 buf_size -= end_padding_alignment;
956         } else {
957                 /*
958                  * Start is aligned the same or better than end,
959                  * just align length.
960                  */
961                 buf_size = P2ALIGN(buf_size, nic_align_end);
962         }
963
964         return buf_size;
965 }
966
967 int
968 sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
969              uint16_t nb_rx_desc, unsigned int socket_id,
970              const struct rte_eth_rxconf *rx_conf,
971              struct rte_mempool *mb_pool)
972 {
973         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
974         struct sfc_rss *rss = &sa->rss;
975         int rc;
976         unsigned int rxq_entries;
977         unsigned int evq_entries;
978         unsigned int rxq_max_fill_level;
979         uint16_t buf_size;
980         struct sfc_rxq_info *rxq_info;
981         struct sfc_evq *evq;
982         struct sfc_rxq *rxq;
983         struct sfc_dp_rx_qcreate_info info;
984
985         rc = sa->dp_rx->qsize_up_rings(nb_rx_desc, &rxq_entries, &evq_entries,
986                                        &rxq_max_fill_level);
987         if (rc != 0)
988                 goto fail_size_up_rings;
989         SFC_ASSERT(rxq_entries >= EFX_RXQ_MINNDESCS);
990         SFC_ASSERT(rxq_entries <= EFX_RXQ_MAXNDESCS);
991         SFC_ASSERT(rxq_max_fill_level <= nb_rx_desc);
992
993         rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf);
994         if (rc != 0)
995                 goto fail_bad_conf;
996
997         buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool);
998         if (buf_size == 0) {
999                 sfc_err(sa, "RxQ %u mbuf pool object size is too small",
1000                         sw_index);
1001                 rc = EINVAL;
1002                 goto fail_bad_conf;
1003         }
1004
1005         if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
1006             (~rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)) {
1007                 sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
1008                         "object size is too small", sw_index);
1009                 sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
1010                         "PDU size %u plus Rx prefix %u bytes",
1011                         sw_index, buf_size, (unsigned int)sa->port.pdu,
1012                         encp->enc_rx_prefix_size);
1013                 rc = EINVAL;
1014                 goto fail_bad_conf;
1015         }
1016
1017         SFC_ASSERT(sw_index < sa->rxq_count);
1018         rxq_info = &sa->rxq_info[sw_index];
1019
1020         SFC_ASSERT(rxq_entries <= rxq_info->max_entries);
1021         rxq_info->entries = rxq_entries;
1022         rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
1023         rxq_info->type_flags =
1024                 (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) ?
1025                 EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
1026
1027         if ((encp->enc_tunnel_encapsulations_supported != 0) &&
1028             (sa->dp_rx->features & SFC_DP_RX_FEAT_TUNNELS))
1029                 rxq_info->type_flags |= EFX_RXQ_FLAG_INNER_CLASSES;
1030
1031         rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_RX, sw_index,
1032                           evq_entries, socket_id, &evq);
1033         if (rc != 0)
1034                 goto fail_ev_qinit;
1035
1036         rc = ENOMEM;
1037         rxq = rte_zmalloc_socket("sfc-rxq", sizeof(*rxq), RTE_CACHE_LINE_SIZE,
1038                                  socket_id);
1039         if (rxq == NULL)
1040                 goto fail_rxq_alloc;
1041
1042         rxq_info->rxq = rxq;
1043
1044         rxq->evq = evq;
1045         rxq->hw_index = sw_index;
1046         rxq->refill_threshold =
1047                 RTE_MAX(rx_conf->rx_free_thresh, SFC_RX_REFILL_BULK);
1048         rxq->refill_mb_pool = mb_pool;
1049
1050         rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries),
1051                            socket_id, &rxq->mem);
1052         if (rc != 0)
1053                 goto fail_dma_alloc;
1054
1055         memset(&info, 0, sizeof(info));
1056         info.refill_mb_pool = rxq->refill_mb_pool;
1057         info.max_fill_level = rxq_max_fill_level;
1058         info.refill_threshold = rxq->refill_threshold;
1059         info.buf_size = buf_size;
1060         info.batch_max = encp->enc_rx_batch_max;
1061         info.prefix_size = encp->enc_rx_prefix_size;
1062
1063         if (rss->hash_support == EFX_RX_HASH_AVAILABLE && rss->channels > 0)
1064                 info.flags |= SFC_RXQ_FLAG_RSS_HASH;
1065
1066         info.rxq_entries = rxq_info->entries;
1067         info.rxq_hw_ring = rxq->mem.esm_base;
1068         info.evq_entries = evq_entries;
1069         info.evq_hw_ring = evq->mem.esm_base;
1070         info.hw_index = rxq->hw_index;
1071         info.mem_bar = sa->mem_bar.esb_base;
1072         info.vi_window_shift = encp->enc_vi_window_shift;
1073
1074         rc = sa->dp_rx->qcreate(sa->eth_dev->data->port_id, sw_index,
1075                                 &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
1076                                 socket_id, &info, &rxq->dp);
1077         if (rc != 0)
1078                 goto fail_dp_rx_qcreate;
1079
1080         evq->dp_rxq = rxq->dp;
1081
1082         rxq->state = SFC_RXQ_INITIALIZED;
1083
1084         rxq_info->deferred_start = (rx_conf->rx_deferred_start != 0);
1085
1086         return 0;
1087
1088 fail_dp_rx_qcreate:
1089         sfc_dma_free(sa, &rxq->mem);
1090
1091 fail_dma_alloc:
1092         rxq_info->rxq = NULL;
1093         rte_free(rxq);
1094
1095 fail_rxq_alloc:
1096         sfc_ev_qfini(evq);
1097
1098 fail_ev_qinit:
1099         rxq_info->entries = 0;
1100
1101 fail_bad_conf:
1102 fail_size_up_rings:
1103         sfc_log_init(sa, "failed %d", rc);
1104         return rc;
1105 }
1106
1107 void
1108 sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
1109 {
1110         struct sfc_rxq_info *rxq_info;
1111         struct sfc_rxq *rxq;
1112
1113         SFC_ASSERT(sw_index < sa->rxq_count);
1114
1115         rxq_info = &sa->rxq_info[sw_index];
1116
1117         rxq = rxq_info->rxq;
1118         SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
1119
1120         sa->dp_rx->qdestroy(rxq->dp);
1121         rxq->dp = NULL;
1122
1123         rxq_info->rxq = NULL;
1124         rxq_info->entries = 0;
1125
1126         sfc_dma_free(sa, &rxq->mem);
1127
1128         sfc_ev_qfini(rxq->evq);
1129         rxq->evq = NULL;
1130
1131         rte_free(rxq);
1132 }
1133
1134 /*
1135  * Mapping between RTE RSS hash functions and their EFX counterparts.
1136  */
1137 struct sfc_rss_hf_rte_to_efx sfc_rss_hf_map[] = {
1138         { ETH_RSS_NONFRAG_IPV4_TCP,
1139           EFX_RX_HASH(IPV4_TCP, 4TUPLE) },
1140         { ETH_RSS_NONFRAG_IPV4_UDP,
1141           EFX_RX_HASH(IPV4_UDP, 4TUPLE) },
1142         { ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX,
1143           EFX_RX_HASH(IPV6_TCP, 4TUPLE) },
1144         { ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX,
1145           EFX_RX_HASH(IPV6_UDP, 4TUPLE) },
1146         { ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER,
1147           EFX_RX_HASH(IPV4_TCP, 2TUPLE) | EFX_RX_HASH(IPV4_UDP, 2TUPLE) |
1148           EFX_RX_HASH(IPV4, 2TUPLE) },
1149         { ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER |
1150           ETH_RSS_IPV6_EX,
1151           EFX_RX_HASH(IPV6_TCP, 2TUPLE) | EFX_RX_HASH(IPV6_UDP, 2TUPLE) |
1152           EFX_RX_HASH(IPV6, 2TUPLE) }
1153 };
1154
1155 static efx_rx_hash_type_t
1156 sfc_rx_hash_types_mask_supp(efx_rx_hash_type_t hash_type,
1157                             unsigned int *hash_type_flags_supported,
1158                             unsigned int nb_hash_type_flags_supported)
1159 {
1160         efx_rx_hash_type_t hash_type_masked = 0;
1161         unsigned int i, j;
1162
1163         for (i = 0; i < nb_hash_type_flags_supported; ++i) {
1164                 unsigned int class_tuple_lbn[] = {
1165                         EFX_RX_CLASS_IPV4_TCP_LBN,
1166                         EFX_RX_CLASS_IPV4_UDP_LBN,
1167                         EFX_RX_CLASS_IPV4_LBN,
1168                         EFX_RX_CLASS_IPV6_TCP_LBN,
1169                         EFX_RX_CLASS_IPV6_UDP_LBN,
1170                         EFX_RX_CLASS_IPV6_LBN
1171                 };
1172
1173                 for (j = 0; j < RTE_DIM(class_tuple_lbn); ++j) {
1174                         unsigned int tuple_mask = EFX_RX_CLASS_HASH_4TUPLE;
1175                         unsigned int flag;
1176
1177                         tuple_mask <<= class_tuple_lbn[j];
1178                         flag = hash_type & tuple_mask;
1179
1180                         if (flag == hash_type_flags_supported[i])
1181                                 hash_type_masked |= flag;
1182                 }
1183         }
1184
1185         return hash_type_masked;
1186 }
1187
1188 int
1189 sfc_rx_hash_init(struct sfc_adapter *sa)
1190 {
1191         struct sfc_rss *rss = &sa->rss;
1192         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1193         uint32_t alg_mask = encp->enc_rx_scale_hash_alg_mask;
1194         efx_rx_hash_alg_t alg;
1195         unsigned int flags_supp[EFX_RX_HASH_NFLAGS];
1196         unsigned int nb_flags_supp;
1197         struct sfc_rss_hf_rte_to_efx *hf_map;
1198         struct sfc_rss_hf_rte_to_efx *entry;
1199         efx_rx_hash_type_t efx_hash_types;
1200         unsigned int i;
1201         int rc;
1202
1203         if (alg_mask & (1U << EFX_RX_HASHALG_TOEPLITZ))
1204                 alg = EFX_RX_HASHALG_TOEPLITZ;
1205         else if (alg_mask & (1U << EFX_RX_HASHALG_PACKED_STREAM))
1206                 alg = EFX_RX_HASHALG_PACKED_STREAM;
1207         else
1208                 return EINVAL;
1209
1210         rc = efx_rx_scale_hash_flags_get(sa->nic, alg, flags_supp,
1211                                          &nb_flags_supp);
1212         if (rc != 0)
1213                 return rc;
1214
1215         hf_map = rte_calloc_socket("sfc-rss-hf-map",
1216                                    RTE_DIM(sfc_rss_hf_map),
1217                                    sizeof(*hf_map), 0, sa->socket_id);
1218         if (hf_map == NULL)
1219                 return ENOMEM;
1220
1221         entry = hf_map;
1222         efx_hash_types = 0;
1223         for (i = 0; i < RTE_DIM(sfc_rss_hf_map); ++i) {
1224                 efx_rx_hash_type_t ht;
1225
1226                 ht = sfc_rx_hash_types_mask_supp(sfc_rss_hf_map[i].efx,
1227                                                  flags_supp, nb_flags_supp);
1228                 if (ht != 0) {
1229                         entry->rte = sfc_rss_hf_map[i].rte;
1230                         entry->efx = ht;
1231                         efx_hash_types |= ht;
1232                         ++entry;
1233                 }
1234         }
1235
1236         rss->hash_alg = alg;
1237         rss->hf_map_nb_entries = (unsigned int)(entry - hf_map);
1238         rss->hf_map = hf_map;
1239         rss->hash_types = efx_hash_types;
1240
1241         return 0;
1242 }
1243
1244 void
1245 sfc_rx_hash_fini(struct sfc_adapter *sa)
1246 {
1247         struct sfc_rss *rss = &sa->rss;
1248
1249         rte_free(rss->hf_map);
1250 }
1251
1252 int
1253 sfc_rx_hf_rte_to_efx(struct sfc_adapter *sa, uint64_t rte,
1254                      efx_rx_hash_type_t *efx)
1255 {
1256         struct sfc_rss *rss = &sa->rss;
1257         efx_rx_hash_type_t hash_types = 0;
1258         unsigned int i;
1259
1260         for (i = 0; i < rss->hf_map_nb_entries; ++i) {
1261                 uint64_t rte_mask = rss->hf_map[i].rte;
1262
1263                 if ((rte & rte_mask) != 0) {
1264                         rte &= ~rte_mask;
1265                         hash_types |= rss->hf_map[i].efx;
1266                 }
1267         }
1268
1269         if (rte != 0) {
1270                 sfc_err(sa, "unsupported hash functions requested");
1271                 return EINVAL;
1272         }
1273
1274         *efx = hash_types;
1275
1276         return 0;
1277 }
1278
1279 uint64_t
1280 sfc_rx_hf_efx_to_rte(struct sfc_adapter *sa, efx_rx_hash_type_t efx)
1281 {
1282         struct sfc_rss *rss = &sa->rss;
1283         uint64_t rte = 0;
1284         unsigned int i;
1285
1286         for (i = 0; i < rss->hf_map_nb_entries; ++i) {
1287                 efx_rx_hash_type_t hash_type = rss->hf_map[i].efx;
1288
1289                 if ((efx & hash_type) == hash_type)
1290                         rte |= rss->hf_map[i].rte;
1291         }
1292
1293         return rte;
1294 }
1295
1296 static int
1297 sfc_rx_process_adv_conf_rss(struct sfc_adapter *sa,
1298                             struct rte_eth_rss_conf *conf)
1299 {
1300         struct sfc_rss *rss = &sa->rss;
1301         efx_rx_hash_type_t efx_hash_types = rss->hash_types;
1302         uint64_t rss_hf = sfc_rx_hf_efx_to_rte(sa, efx_hash_types);
1303         int rc;
1304
1305         if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) {
1306                 if ((conf->rss_hf != 0 && conf->rss_hf != rss_hf) ||
1307                     conf->rss_key != NULL)
1308                         return EINVAL;
1309         }
1310
1311         if (conf->rss_hf != 0) {
1312                 rc = sfc_rx_hf_rte_to_efx(sa, conf->rss_hf, &efx_hash_types);
1313                 if (rc != 0)
1314                         return rc;
1315         }
1316
1317         if (conf->rss_key != NULL) {
1318                 if (conf->rss_key_len != sizeof(rss->key)) {
1319                         sfc_err(sa, "RSS key size is wrong (should be %lu)",
1320                                 sizeof(rss->key));
1321                         return EINVAL;
1322                 }
1323                 rte_memcpy(rss->key, conf->rss_key, sizeof(rss->key));
1324         }
1325
1326         rss->hash_types = efx_hash_types;
1327
1328         return 0;
1329 }
1330
1331 static int
1332 sfc_rx_rss_config(struct sfc_adapter *sa)
1333 {
1334         struct sfc_rss *rss = &sa->rss;
1335         int rc = 0;
1336
1337         if (rss->channels > 0) {
1338                 rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1339                                            rss->hash_alg, rss->hash_types,
1340                                            B_TRUE);
1341                 if (rc != 0)
1342                         goto finish;
1343
1344                 rc = efx_rx_scale_key_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1345                                           rss->key, sizeof(rss->key));
1346                 if (rc != 0)
1347                         goto finish;
1348
1349                 rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1350                                           rss->tbl, RTE_DIM(rss->tbl));
1351         }
1352
1353 finish:
1354         return rc;
1355 }
1356
1357 int
1358 sfc_rx_start(struct sfc_adapter *sa)
1359 {
1360         unsigned int sw_index;
1361         int rc;
1362
1363         sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
1364
1365         rc = efx_rx_init(sa->nic);
1366         if (rc != 0)
1367                 goto fail_rx_init;
1368
1369         rc = sfc_rx_rss_config(sa);
1370         if (rc != 0)
1371                 goto fail_rss_config;
1372
1373         for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
1374                 if ((!sa->rxq_info[sw_index].deferred_start ||
1375                      sa->rxq_info[sw_index].deferred_started)) {
1376                         rc = sfc_rx_qstart(sa, sw_index);
1377                         if (rc != 0)
1378                                 goto fail_rx_qstart;
1379                 }
1380         }
1381
1382         return 0;
1383
1384 fail_rx_qstart:
1385         while (sw_index-- > 0)
1386                 sfc_rx_qstop(sa, sw_index);
1387
1388 fail_rss_config:
1389         efx_rx_fini(sa->nic);
1390
1391 fail_rx_init:
1392         sfc_log_init(sa, "failed %d", rc);
1393         return rc;
1394 }
1395
1396 void
1397 sfc_rx_stop(struct sfc_adapter *sa)
1398 {
1399         unsigned int sw_index;
1400
1401         sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
1402
1403         sw_index = sa->rxq_count;
1404         while (sw_index-- > 0) {
1405                 if (sa->rxq_info[sw_index].rxq != NULL)
1406                         sfc_rx_qstop(sa, sw_index);
1407         }
1408
1409         efx_rx_fini(sa->nic);
1410 }
1411
1412 static int
1413 sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
1414 {
1415         struct sfc_rxq_info *rxq_info = &sa->rxq_info[sw_index];
1416         unsigned int max_entries;
1417
1418         max_entries = EFX_RXQ_MAXNDESCS;
1419         SFC_ASSERT(rte_is_power_of_2(max_entries));
1420
1421         rxq_info->max_entries = max_entries;
1422
1423         return 0;
1424 }
1425
1426 static int
1427 sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
1428 {
1429         struct sfc_rss *rss = &sa->rss;
1430         uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) |
1431                                       sfc_rx_get_queue_offload_caps(sa);
1432         uint64_t offloads_rejected = rxmode->offloads & ~offloads_supported;
1433         int rc = 0;
1434
1435         switch (rxmode->mq_mode) {
1436         case ETH_MQ_RX_NONE:
1437                 /* No special checks are required */
1438                 break;
1439         case ETH_MQ_RX_RSS:
1440                 if (rss->context_type == EFX_RX_SCALE_UNAVAILABLE) {
1441                         sfc_err(sa, "RSS is not available");
1442                         rc = EINVAL;
1443                 }
1444                 break;
1445         default:
1446                 sfc_err(sa, "Rx multi-queue mode %u not supported",
1447                         rxmode->mq_mode);
1448                 rc = EINVAL;
1449         }
1450
1451         if (offloads_rejected) {
1452                 sfc_rx_log_offloads(sa, "device", "is unsupported",
1453                                     offloads_rejected);
1454                 rc = EINVAL;
1455         }
1456
1457         if (~rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
1458                 sfc_warn(sa, "FCS stripping cannot be disabled - always on");
1459                 rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
1460                 rxmode->hw_strip_crc = 1;
1461         }
1462
1463         return rc;
1464 }
1465
1466 /**
1467  * Destroy excess queues that are no longer needed after reconfiguration
1468  * or complete close.
1469  */
1470 static void
1471 sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues)
1472 {
1473         int sw_index;
1474
1475         SFC_ASSERT(nb_rx_queues <= sa->rxq_count);
1476
1477         sw_index = sa->rxq_count;
1478         while (--sw_index >= (int)nb_rx_queues) {
1479                 if (sa->rxq_info[sw_index].rxq != NULL)
1480                         sfc_rx_qfini(sa, sw_index);
1481         }
1482
1483         sa->rxq_count = nb_rx_queues;
1484 }
1485
1486 /**
1487  * Initialize Rx subsystem.
1488  *
1489  * Called at device (re)configuration stage when number of receive queues is
1490  * specified together with other device level receive configuration.
1491  *
1492  * It should be used to allocate NUMA-unaware resources.
1493  */
1494 int
1495 sfc_rx_configure(struct sfc_adapter *sa)
1496 {
1497         struct sfc_rss *rss = &sa->rss;
1498         struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
1499         const unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues;
1500         int rc;
1501
1502         sfc_log_init(sa, "nb_rx_queues=%u (old %u)",
1503                      nb_rx_queues, sa->rxq_count);
1504
1505         rc = sfc_rx_check_mode(sa, &dev_conf->rxmode);
1506         if (rc != 0)
1507                 goto fail_check_mode;
1508
1509         if (nb_rx_queues == sa->rxq_count)
1510                 goto done;
1511
1512         if (sa->rxq_info == NULL) {
1513                 rc = ENOMEM;
1514                 sa->rxq_info = rte_calloc_socket("sfc-rxqs", nb_rx_queues,
1515                                                  sizeof(sa->rxq_info[0]), 0,
1516                                                  sa->socket_id);
1517                 if (sa->rxq_info == NULL)
1518                         goto fail_rxqs_alloc;
1519         } else {
1520                 struct sfc_rxq_info *new_rxq_info;
1521
1522                 if (nb_rx_queues < sa->rxq_count)
1523                         sfc_rx_fini_queues(sa, nb_rx_queues);
1524
1525                 rc = ENOMEM;
1526                 new_rxq_info =
1527                         rte_realloc(sa->rxq_info,
1528                                     nb_rx_queues * sizeof(sa->rxq_info[0]), 0);
1529                 if (new_rxq_info == NULL && nb_rx_queues > 0)
1530                         goto fail_rxqs_realloc;
1531
1532                 sa->rxq_info = new_rxq_info;
1533                 if (nb_rx_queues > sa->rxq_count)
1534                         memset(&sa->rxq_info[sa->rxq_count], 0,
1535                                (nb_rx_queues - sa->rxq_count) *
1536                                sizeof(sa->rxq_info[0]));
1537         }
1538
1539         while (sa->rxq_count < nb_rx_queues) {
1540                 rc = sfc_rx_qinit_info(sa, sa->rxq_count);
1541                 if (rc != 0)
1542                         goto fail_rx_qinit_info;
1543
1544                 sa->rxq_count++;
1545         }
1546
1547         rss->channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
1548                          MIN(sa->rxq_count, EFX_MAXRSS) : 0;
1549
1550         if (rss->channels > 0) {
1551                 struct rte_eth_rss_conf *adv_conf_rss;
1552                 unsigned int sw_index;
1553
1554                 for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index)
1555                         rss->tbl[sw_index] = sw_index % rss->channels;
1556
1557                 adv_conf_rss = &dev_conf->rx_adv_conf.rss_conf;
1558                 rc = sfc_rx_process_adv_conf_rss(sa, adv_conf_rss);
1559                 if (rc != 0)
1560                         goto fail_rx_process_adv_conf_rss;
1561         }
1562
1563 done:
1564         return 0;
1565
1566 fail_rx_process_adv_conf_rss:
1567 fail_rx_qinit_info:
1568 fail_rxqs_realloc:
1569 fail_rxqs_alloc:
1570         sfc_rx_close(sa);
1571
1572 fail_check_mode:
1573         sfc_log_init(sa, "failed %d", rc);
1574         return rc;
1575 }
1576
1577 /**
1578  * Shutdown Rx subsystem.
1579  *
1580  * Called at device close stage, for example, before device shutdown.
1581  */
1582 void
1583 sfc_rx_close(struct sfc_adapter *sa)
1584 {
1585         struct sfc_rss *rss = &sa->rss;
1586
1587         sfc_rx_fini_queues(sa, 0);
1588
1589         rss->channels = 0;
1590
1591         rte_free(sa->rxq_info);
1592         sa->rxq_info = NULL;
1593 }