net/ixgbe/base: remove X550em SFP iXFI setup
[dpdk.git] / drivers / net / sfc / sfc_rx.c
1 /*-
2  * Copyright (c) 2016 Solarflare Communications Inc.
3  * All rights reserved.
4  *
5  * This software was jointly developed between OKTET Labs (under contract
6  * for Solarflare) and Solarflare Communications, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright notice,
14  *    this list of conditions and the following disclaimer in the documentation
15  *    and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #include <rte_mempool.h>
31
32 #include "efx.h"
33
34 #include "sfc.h"
35 #include "sfc_debug.h"
36 #include "sfc_log.h"
37 #include "sfc_ev.h"
38 #include "sfc_rx.h"
39 #include "sfc_tweak.h"
40
41 /*
42  * Maximum number of Rx queue flush attempt in the case of failure or
43  * flush timeout
44  */
45 #define SFC_RX_QFLUSH_ATTEMPTS          (3)
46
47 /*
48  * Time to wait between event queue polling attempts when waiting for Rx
49  * queue flush done or failed events.
50  */
51 #define SFC_RX_QFLUSH_POLL_WAIT_MS      (1)
52
53 /*
54  * Maximum number of event queue polling attempts when waiting for Rx queue
55  * flush done or failed events. It defines Rx queue flush attempt timeout
56  * together with SFC_RX_QFLUSH_POLL_WAIT_MS.
57  */
58 #define SFC_RX_QFLUSH_POLL_ATTEMPTS     (2000)
59
60 void
61 sfc_rx_qflush_done(struct sfc_rxq *rxq)
62 {
63         rxq->state |= SFC_RXQ_FLUSHED;
64         rxq->state &= ~SFC_RXQ_FLUSHING;
65 }
66
67 void
68 sfc_rx_qflush_failed(struct sfc_rxq *rxq)
69 {
70         rxq->state |= SFC_RXQ_FLUSH_FAILED;
71         rxq->state &= ~SFC_RXQ_FLUSHING;
72 }
73
74 static void
75 sfc_rx_qrefill(struct sfc_rxq *rxq)
76 {
77         unsigned int free_space;
78         unsigned int bulks;
79         void *objs[SFC_RX_REFILL_BULK];
80         efsys_dma_addr_t addr[RTE_DIM(objs)];
81         unsigned int added = rxq->added;
82         unsigned int id;
83         unsigned int i;
84         struct sfc_rx_sw_desc *rxd;
85         struct rte_mbuf *m;
86         uint8_t port_id = rxq->port_id;
87
88         free_space = EFX_RXQ_LIMIT(rxq->ptr_mask + 1) -
89                 (added - rxq->completed);
90
91         if (free_space < rxq->refill_threshold)
92                 return;
93
94         bulks = free_space / RTE_DIM(objs);
95
96         id = added & rxq->ptr_mask;
97         while (bulks-- > 0) {
98                 if (rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
99                                          RTE_DIM(objs)) < 0) {
100                         /*
101                          * It is hardly a safe way to increment counter
102                          * from different contexts, but all PMDs do it.
103                          */
104                         rxq->evq->sa->eth_dev->data->rx_mbuf_alloc_failed +=
105                                 RTE_DIM(objs);
106                         break;
107                 }
108
109                 for (i = 0; i < RTE_DIM(objs);
110                      ++i, id = (id + 1) & rxq->ptr_mask) {
111                         m = objs[i];
112
113                         rxd = &rxq->sw_desc[id];
114                         rxd->mbuf = m;
115
116                         rte_mbuf_refcnt_set(m, 1);
117                         m->data_off = RTE_PKTMBUF_HEADROOM;
118                         m->next = NULL;
119                         m->nb_segs = 1;
120                         m->port = port_id;
121
122                         addr[i] = rte_pktmbuf_mtophys(m);
123                 }
124
125                 efx_rx_qpost(rxq->common, addr, rxq->buf_size,
126                              RTE_DIM(objs), rxq->completed, added);
127                 added += RTE_DIM(objs);
128         }
129
130         /* Push doorbell if something is posted */
131         if (rxq->added != added) {
132                 rxq->added = added;
133                 efx_rx_qpush(rxq->common, added, &rxq->pushed);
134         }
135 }
136
137 static uint64_t
138 sfc_rx_desc_flags_to_offload_flags(const unsigned int desc_flags)
139 {
140         uint64_t mbuf_flags = 0;
141
142         switch (desc_flags & (EFX_PKT_IPV4 | EFX_CKSUM_IPV4)) {
143         case (EFX_PKT_IPV4 | EFX_CKSUM_IPV4):
144                 mbuf_flags |= PKT_RX_IP_CKSUM_GOOD;
145                 break;
146         case EFX_PKT_IPV4:
147                 mbuf_flags |= PKT_RX_IP_CKSUM_BAD;
148                 break;
149         default:
150                 RTE_BUILD_BUG_ON(PKT_RX_IP_CKSUM_UNKNOWN != 0);
151                 SFC_ASSERT((mbuf_flags & PKT_RX_IP_CKSUM_MASK) ==
152                            PKT_RX_IP_CKSUM_UNKNOWN);
153                 break;
154         }
155
156         switch ((desc_flags &
157                  (EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP))) {
158         case (EFX_PKT_TCP | EFX_CKSUM_TCPUDP):
159         case (EFX_PKT_UDP | EFX_CKSUM_TCPUDP):
160                 mbuf_flags |= PKT_RX_L4_CKSUM_GOOD;
161                 break;
162         case EFX_PKT_TCP:
163         case EFX_PKT_UDP:
164                 mbuf_flags |= PKT_RX_L4_CKSUM_BAD;
165                 break;
166         default:
167                 RTE_BUILD_BUG_ON(PKT_RX_L4_CKSUM_UNKNOWN != 0);
168                 SFC_ASSERT((mbuf_flags & PKT_RX_L4_CKSUM_MASK) ==
169                            PKT_RX_L4_CKSUM_UNKNOWN);
170                 break;
171         }
172
173         return mbuf_flags;
174 }
175
176 static uint32_t
177 sfc_rx_desc_flags_to_packet_type(const unsigned int desc_flags)
178 {
179         return RTE_PTYPE_L2_ETHER |
180                 ((desc_flags & EFX_PKT_IPV4) ?
181                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : 0) |
182                 ((desc_flags & EFX_PKT_IPV6) ?
183                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : 0) |
184                 ((desc_flags & EFX_PKT_TCP) ? RTE_PTYPE_L4_TCP : 0) |
185                 ((desc_flags & EFX_PKT_UDP) ? RTE_PTYPE_L4_UDP : 0);
186 }
187
188 static void
189 sfc_rx_set_rss_hash(struct sfc_rxq *rxq, unsigned int flags, struct rte_mbuf *m)
190 {
191 #if EFSYS_OPT_RX_SCALE
192         uint8_t *mbuf_data;
193
194
195         if ((rxq->flags & SFC_RXQ_RSS_HASH) == 0)
196                 return;
197
198         mbuf_data = rte_pktmbuf_mtod(m, uint8_t *);
199
200         if (flags & (EFX_PKT_IPV4 | EFX_PKT_IPV6)) {
201                 m->hash.rss = efx_pseudo_hdr_hash_get(rxq->common,
202                                                       EFX_RX_HASHALG_TOEPLITZ,
203                                                       mbuf_data);
204
205                 m->ol_flags |= PKT_RX_RSS_HASH;
206         }
207 #endif
208 }
209
210 uint16_t
211 sfc_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
212 {
213         struct sfc_rxq *rxq = rx_queue;
214         unsigned int completed;
215         unsigned int prefix_size = rxq->prefix_size;
216         unsigned int done_pkts = 0;
217         boolean_t discard_next = B_FALSE;
218         struct rte_mbuf *scatter_pkt = NULL;
219
220         if (unlikely((rxq->state & SFC_RXQ_RUNNING) == 0))
221                 return 0;
222
223         sfc_ev_qpoll(rxq->evq);
224
225         completed = rxq->completed;
226         while (completed != rxq->pending && done_pkts < nb_pkts) {
227                 unsigned int id;
228                 struct sfc_rx_sw_desc *rxd;
229                 struct rte_mbuf *m;
230                 unsigned int seg_len;
231                 unsigned int desc_flags;
232
233                 id = completed++ & rxq->ptr_mask;
234                 rxd = &rxq->sw_desc[id];
235                 m = rxd->mbuf;
236                 desc_flags = rxd->flags;
237
238                 if (discard_next)
239                         goto discard;
240
241                 if (desc_flags & (EFX_ADDR_MISMATCH | EFX_DISCARD))
242                         goto discard;
243
244                 if (desc_flags & EFX_PKT_PREFIX_LEN) {
245                         uint16_t tmp_size;
246                         int rc __rte_unused;
247
248                         rc = efx_pseudo_hdr_pkt_length_get(rxq->common,
249                                 rte_pktmbuf_mtod(m, uint8_t *), &tmp_size);
250                         SFC_ASSERT(rc == 0);
251                         seg_len = tmp_size;
252                 } else {
253                         seg_len = rxd->size - prefix_size;
254                 }
255
256                 rte_pktmbuf_data_len(m) = seg_len;
257                 rte_pktmbuf_pkt_len(m) = seg_len;
258
259                 if (scatter_pkt != NULL) {
260                         if (rte_pktmbuf_chain(scatter_pkt, m) != 0) {
261                                 rte_mempool_put(rxq->refill_mb_pool,
262                                                 scatter_pkt);
263                                 goto discard;
264                         }
265                         /* The packet to deliver */
266                         m = scatter_pkt;
267                 }
268
269                 if (desc_flags & EFX_PKT_CONT) {
270                         /* The packet is scattered, more fragments to come */
271                         scatter_pkt = m;
272                         /* Futher fragments have no prefix */
273                         prefix_size = 0;
274                         continue;
275                 }
276
277                 /* Scattered packet is done */
278                 scatter_pkt = NULL;
279                 /* The first fragment of the packet has prefix */
280                 prefix_size = rxq->prefix_size;
281
282                 m->ol_flags = sfc_rx_desc_flags_to_offload_flags(desc_flags);
283                 m->packet_type = sfc_rx_desc_flags_to_packet_type(desc_flags);
284
285                 /*
286                  * Extract RSS hash from the packet prefix and
287                  * set the corresponding field (if needed and possible)
288                  */
289                 sfc_rx_set_rss_hash(rxq, desc_flags, m);
290
291                 m->data_off += prefix_size;
292
293                 *rx_pkts++ = m;
294                 done_pkts++;
295                 continue;
296
297 discard:
298                 discard_next = ((desc_flags & EFX_PKT_CONT) != 0);
299                 rte_mempool_put(rxq->refill_mb_pool, m);
300                 rxd->mbuf = NULL;
301         }
302
303         /* pending is only moved when entire packet is received */
304         SFC_ASSERT(scatter_pkt == NULL);
305
306         rxq->completed = completed;
307
308         sfc_rx_qrefill(rxq);
309
310         return done_pkts;
311 }
312
313 unsigned int
314 sfc_rx_qdesc_npending(struct sfc_adapter *sa, unsigned int sw_index)
315 {
316         struct sfc_rxq *rxq;
317
318         SFC_ASSERT(sw_index < sa->rxq_count);
319         rxq = sa->rxq_info[sw_index].rxq;
320
321         if (rxq == NULL || (rxq->state & SFC_RXQ_RUNNING) == 0)
322                 return 0;
323
324         sfc_ev_qpoll(rxq->evq);
325
326         return rxq->pending - rxq->completed;
327 }
328
329 int
330 sfc_rx_qdesc_done(struct sfc_rxq *rxq, unsigned int offset)
331 {
332         if ((rxq->state & SFC_RXQ_RUNNING) == 0)
333                 return 0;
334
335         sfc_ev_qpoll(rxq->evq);
336
337         return offset < (rxq->pending - rxq->completed);
338 }
339
340 static void
341 sfc_rx_qpurge(struct sfc_rxq *rxq)
342 {
343         unsigned int i;
344         struct sfc_rx_sw_desc *rxd;
345
346         for (i = rxq->completed; i != rxq->added; ++i) {
347                 rxd = &rxq->sw_desc[i & rxq->ptr_mask];
348                 rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
349                 rxd->mbuf = NULL;
350         }
351 }
352
353 static void
354 sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
355 {
356         struct sfc_rxq *rxq;
357         unsigned int retry_count;
358         unsigned int wait_count;
359
360         rxq = sa->rxq_info[sw_index].rxq;
361         SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
362
363         /*
364          * Retry Rx queue flushing in the case of flush failed or
365          * timeout. In the worst case it can delay for 6 seconds.
366          */
367         for (retry_count = 0;
368              ((rxq->state & SFC_RXQ_FLUSHED) == 0) &&
369              (retry_count < SFC_RX_QFLUSH_ATTEMPTS);
370              ++retry_count) {
371                 if (efx_rx_qflush(rxq->common) != 0) {
372                         rxq->state |= SFC_RXQ_FLUSH_FAILED;
373                         break;
374                 }
375                 rxq->state &= ~SFC_RXQ_FLUSH_FAILED;
376                 rxq->state |= SFC_RXQ_FLUSHING;
377
378                 /*
379                  * Wait for Rx queue flush done or failed event at least
380                  * SFC_RX_QFLUSH_POLL_WAIT_MS milliseconds and not more
381                  * than 2 seconds (SFC_RX_QFLUSH_POLL_WAIT_MS multiplied
382                  * by SFC_RX_QFLUSH_POLL_ATTEMPTS).
383                  */
384                 wait_count = 0;
385                 do {
386                         rte_delay_ms(SFC_RX_QFLUSH_POLL_WAIT_MS);
387                         sfc_ev_qpoll(rxq->evq);
388                 } while ((rxq->state & SFC_RXQ_FLUSHING) &&
389                          (wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS));
390
391                 if (rxq->state & SFC_RXQ_FLUSHING)
392                         sfc_err(sa, "RxQ %u flush timed out", sw_index);
393
394                 if (rxq->state & SFC_RXQ_FLUSH_FAILED)
395                         sfc_err(sa, "RxQ %u flush failed", sw_index);
396
397                 if (rxq->state & SFC_RXQ_FLUSHED)
398                         sfc_info(sa, "RxQ %u flushed", sw_index);
399         }
400
401         sfc_rx_qpurge(rxq);
402 }
403
404 int
405 sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
406 {
407         struct sfc_rxq_info *rxq_info;
408         struct sfc_rxq *rxq;
409         struct sfc_evq *evq;
410         int rc;
411
412         sfc_log_init(sa, "sw_index=%u", sw_index);
413
414         SFC_ASSERT(sw_index < sa->rxq_count);
415
416         rxq_info = &sa->rxq_info[sw_index];
417         rxq = rxq_info->rxq;
418         SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
419
420         evq = rxq->evq;
421
422         rc = sfc_ev_qstart(sa, evq->evq_index);
423         if (rc != 0)
424                 goto fail_ev_qstart;
425
426         rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
427                             &rxq->mem, rxq_info->entries,
428                             0 /* not used on EF10 */, evq->common,
429                             &rxq->common);
430         if (rc != 0)
431                 goto fail_rx_qcreate;
432
433         efx_rx_qenable(rxq->common);
434
435         rxq->pending = rxq->completed = rxq->added = rxq->pushed = 0;
436
437         rxq->state |= (SFC_RXQ_STARTED | SFC_RXQ_RUNNING);
438
439         sfc_rx_qrefill(rxq);
440
441         if (sw_index == 0) {
442                 rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common,
443                                                     (sa->rss_channels > 1) ?
444                                                     B_TRUE : B_FALSE);
445                 if (rc != 0)
446                         goto fail_mac_filter_default_rxq_set;
447         }
448
449         /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
450         sa->eth_dev->data->rx_queue_state[sw_index] =
451                 RTE_ETH_QUEUE_STATE_STARTED;
452
453         return 0;
454
455 fail_mac_filter_default_rxq_set:
456         sfc_rx_qflush(sa, sw_index);
457
458 fail_rx_qcreate:
459         sfc_ev_qstop(sa, evq->evq_index);
460
461 fail_ev_qstart:
462         return rc;
463 }
464
465 void
466 sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
467 {
468         struct sfc_rxq_info *rxq_info;
469         struct sfc_rxq *rxq;
470
471         sfc_log_init(sa, "sw_index=%u", sw_index);
472
473         SFC_ASSERT(sw_index < sa->rxq_count);
474
475         rxq_info = &sa->rxq_info[sw_index];
476         rxq = rxq_info->rxq;
477
478         if (rxq->state == SFC_RXQ_INITIALIZED)
479                 return;
480         SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
481
482         /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
483         sa->eth_dev->data->rx_queue_state[sw_index] =
484                 RTE_ETH_QUEUE_STATE_STOPPED;
485
486         rxq->state &= ~SFC_RXQ_RUNNING;
487
488         if (sw_index == 0)
489                 efx_mac_filter_default_rxq_clear(sa->nic);
490
491         sfc_rx_qflush(sa, sw_index);
492
493         rxq->state = SFC_RXQ_INITIALIZED;
494
495         efx_rx_qdestroy(rxq->common);
496
497         sfc_ev_qstop(sa, rxq->evq->evq_index);
498 }
499
500 static int
501 sfc_rx_qcheck_conf(struct sfc_adapter *sa, uint16_t nb_rx_desc,
502                    const struct rte_eth_rxconf *rx_conf)
503 {
504         const uint16_t rx_free_thresh_max = EFX_RXQ_LIMIT(nb_rx_desc);
505         int rc = 0;
506
507         if (rx_conf->rx_thresh.pthresh != 0 ||
508             rx_conf->rx_thresh.hthresh != 0 ||
509             rx_conf->rx_thresh.wthresh != 0) {
510                 sfc_err(sa,
511                         "RxQ prefetch/host/writeback thresholds are not supported");
512                 rc = EINVAL;
513         }
514
515         if (rx_conf->rx_free_thresh > rx_free_thresh_max) {
516                 sfc_err(sa,
517                         "RxQ free threshold too large: %u vs maximum %u",
518                         rx_conf->rx_free_thresh, rx_free_thresh_max);
519                 rc = EINVAL;
520         }
521
522         if (rx_conf->rx_drop_en == 0) {
523                 sfc_err(sa, "RxQ drop disable is not supported");
524                 rc = EINVAL;
525         }
526
527         return rc;
528 }
529
530 static unsigned int
531 sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool)
532 {
533         uint32_t data_off;
534         uint32_t order;
535
536         /* The mbuf object itself is always cache line aligned */
537         order = rte_bsf32(RTE_CACHE_LINE_SIZE);
538
539         /* Data offset from mbuf object start */
540         data_off = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mb_pool) +
541                 RTE_PKTMBUF_HEADROOM;
542
543         order = MIN(order, rte_bsf32(data_off));
544
545         return 1u << (order - 1);
546 }
547
548 static uint16_t
549 sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool)
550 {
551         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
552         const uint32_t nic_align_start = MAX(1, encp->enc_rx_buf_align_start);
553         const uint32_t nic_align_end = MAX(1, encp->enc_rx_buf_align_end);
554         uint16_t buf_size;
555         unsigned int buf_aligned;
556         unsigned int start_alignment;
557         unsigned int end_padding_alignment;
558
559         /* Below it is assumed that both alignments are power of 2 */
560         SFC_ASSERT(rte_is_power_of_2(nic_align_start));
561         SFC_ASSERT(rte_is_power_of_2(nic_align_end));
562
563         /*
564          * mbuf is always cache line aligned, double-check
565          * that it meets rx buffer start alignment requirements.
566          */
567
568         /* Start from mbuf pool data room size */
569         buf_size = rte_pktmbuf_data_room_size(mb_pool);
570
571         /* Remove headroom */
572         if (buf_size <= RTE_PKTMBUF_HEADROOM) {
573                 sfc_err(sa,
574                         "RxQ mbuf pool %s object data room size %u is smaller than headroom %u",
575                         mb_pool->name, buf_size, RTE_PKTMBUF_HEADROOM);
576                 return 0;
577         }
578         buf_size -= RTE_PKTMBUF_HEADROOM;
579
580         /* Calculate guaranteed data start alignment */
581         buf_aligned = sfc_rx_mbuf_data_alignment(mb_pool);
582
583         /* Reserve space for start alignment */
584         if (buf_aligned < nic_align_start) {
585                 start_alignment = nic_align_start - buf_aligned;
586                 if (buf_size <= start_alignment) {
587                         sfc_err(sa,
588                                 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u and buffer start alignment %u required by NIC",
589                                 mb_pool->name,
590                                 rte_pktmbuf_data_room_size(mb_pool),
591                                 RTE_PKTMBUF_HEADROOM, start_alignment);
592                         return 0;
593                 }
594                 buf_aligned = nic_align_start;
595                 buf_size -= start_alignment;
596         } else {
597                 start_alignment = 0;
598         }
599
600         /* Make sure that end padding does not write beyond the buffer */
601         if (buf_aligned < nic_align_end) {
602                 /*
603                  * Estimate space which can be lost. If guarnteed buffer
604                  * size is odd, lost space is (nic_align_end - 1). More
605                  * accurate formula is below.
606                  */
607                 end_padding_alignment = nic_align_end -
608                         MIN(buf_aligned, 1u << (rte_bsf32(buf_size) - 1));
609                 if (buf_size <= end_padding_alignment) {
610                         sfc_err(sa,
611                                 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u, buffer start alignment %u and end padding alignment %u required by NIC",
612                                 mb_pool->name,
613                                 rte_pktmbuf_data_room_size(mb_pool),
614                                 RTE_PKTMBUF_HEADROOM, start_alignment,
615                                 end_padding_alignment);
616                         return 0;
617                 }
618                 buf_size -= end_padding_alignment;
619         } else {
620                 /*
621                  * Start is aligned the same or better than end,
622                  * just align length.
623                  */
624                 buf_size = P2ALIGN(buf_size, nic_align_end);
625         }
626
627         return buf_size;
628 }
629
630 int
631 sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
632              uint16_t nb_rx_desc, unsigned int socket_id,
633              const struct rte_eth_rxconf *rx_conf,
634              struct rte_mempool *mb_pool)
635 {
636         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
637         int rc;
638         uint16_t buf_size;
639         struct sfc_rxq_info *rxq_info;
640         unsigned int evq_index;
641         struct sfc_evq *evq;
642         struct sfc_rxq *rxq;
643
644         rc = sfc_rx_qcheck_conf(sa, nb_rx_desc, rx_conf);
645         if (rc != 0)
646                 goto fail_bad_conf;
647
648         buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool);
649         if (buf_size == 0) {
650                 sfc_err(sa, "RxQ %u mbuf pool object size is too small",
651                         sw_index);
652                 rc = EINVAL;
653                 goto fail_bad_conf;
654         }
655
656         if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
657             !sa->eth_dev->data->dev_conf.rxmode.enable_scatter) {
658                 sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
659                         "object size is too small", sw_index);
660                 sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
661                         "PDU size %u plus Rx prefix %u bytes",
662                         sw_index, buf_size, (unsigned int)sa->port.pdu,
663                         encp->enc_rx_prefix_size);
664                 rc = EINVAL;
665                 goto fail_bad_conf;
666         }
667
668         SFC_ASSERT(sw_index < sa->rxq_count);
669         rxq_info = &sa->rxq_info[sw_index];
670
671         SFC_ASSERT(nb_rx_desc <= rxq_info->max_entries);
672         rxq_info->entries = nb_rx_desc;
673         rxq_info->type =
674                 sa->eth_dev->data->dev_conf.rxmode.enable_scatter ?
675                 EFX_RXQ_TYPE_SCATTER : EFX_RXQ_TYPE_DEFAULT;
676
677         evq_index = sfc_evq_index_by_rxq_sw_index(sa, sw_index);
678
679         rc = sfc_ev_qinit(sa, evq_index, rxq_info->entries, socket_id);
680         if (rc != 0)
681                 goto fail_ev_qinit;
682
683         evq = sa->evq_info[evq_index].evq;
684
685         rc = ENOMEM;
686         rxq = rte_zmalloc_socket("sfc-rxq", sizeof(*rxq), RTE_CACHE_LINE_SIZE,
687                                  socket_id);
688         if (rxq == NULL)
689                 goto fail_rxq_alloc;
690
691         rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries),
692                            socket_id, &rxq->mem);
693         if (rc != 0)
694                 goto fail_dma_alloc;
695
696         rc = ENOMEM;
697         rxq->sw_desc = rte_calloc_socket("sfc-rxq-sw_desc", rxq_info->entries,
698                                          sizeof(*rxq->sw_desc),
699                                          RTE_CACHE_LINE_SIZE, socket_id);
700         if (rxq->sw_desc == NULL)
701                 goto fail_desc_alloc;
702
703         evq->rxq = rxq;
704         rxq->evq = evq;
705         rxq->ptr_mask = rxq_info->entries - 1;
706         rxq->refill_threshold = rx_conf->rx_free_thresh;
707         rxq->refill_mb_pool = mb_pool;
708         rxq->buf_size = buf_size;
709         rxq->hw_index = sw_index;
710         rxq->port_id = sa->eth_dev->data->port_id;
711
712         /* Cache limits required on datapath in RxQ structure */
713         rxq->batch_max = encp->enc_rx_batch_max;
714         rxq->prefix_size = encp->enc_rx_prefix_size;
715
716 #if EFSYS_OPT_RX_SCALE
717         if (sa->hash_support == EFX_RX_HASH_AVAILABLE)
718                 rxq->flags |= SFC_RXQ_RSS_HASH;
719 #endif
720
721         rxq->state = SFC_RXQ_INITIALIZED;
722
723         rxq_info->rxq = rxq;
724         rxq_info->deferred_start = (rx_conf->rx_deferred_start != 0);
725
726         return 0;
727
728 fail_desc_alloc:
729         sfc_dma_free(sa, &rxq->mem);
730
731 fail_dma_alloc:
732         rte_free(rxq);
733
734 fail_rxq_alloc:
735         sfc_ev_qfini(sa, evq_index);
736
737 fail_ev_qinit:
738         rxq_info->entries = 0;
739
740 fail_bad_conf:
741         sfc_log_init(sa, "failed %d", rc);
742         return rc;
743 }
744
745 void
746 sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
747 {
748         struct sfc_rxq_info *rxq_info;
749         struct sfc_rxq *rxq;
750
751         SFC_ASSERT(sw_index < sa->rxq_count);
752
753         rxq_info = &sa->rxq_info[sw_index];
754
755         rxq = rxq_info->rxq;
756         SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
757
758         rxq_info->rxq = NULL;
759         rxq_info->entries = 0;
760
761         rte_free(rxq->sw_desc);
762         sfc_dma_free(sa, &rxq->mem);
763         rte_free(rxq);
764 }
765
766 #if EFSYS_OPT_RX_SCALE
767 efx_rx_hash_type_t
768 sfc_rte_to_efx_hash_type(uint64_t rss_hf)
769 {
770         efx_rx_hash_type_t efx_hash_types = 0;
771
772         if ((rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
773                        ETH_RSS_NONFRAG_IPV4_OTHER)) != 0)
774                 efx_hash_types |= EFX_RX_HASH_IPV4;
775
776         if ((rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) != 0)
777                 efx_hash_types |= EFX_RX_HASH_TCPIPV4;
778
779         if ((rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
780                         ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX)) != 0)
781                 efx_hash_types |= EFX_RX_HASH_IPV6;
782
783         if ((rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX)) != 0)
784                 efx_hash_types |= EFX_RX_HASH_TCPIPV6;
785
786         return efx_hash_types;
787 }
788
789 uint64_t
790 sfc_efx_to_rte_hash_type(efx_rx_hash_type_t efx_hash_types)
791 {
792         uint64_t rss_hf = 0;
793
794         if ((efx_hash_types & EFX_RX_HASH_IPV4) != 0)
795                 rss_hf |= (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
796                            ETH_RSS_NONFRAG_IPV4_OTHER);
797
798         if ((efx_hash_types & EFX_RX_HASH_TCPIPV4) != 0)
799                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
800
801         if ((efx_hash_types & EFX_RX_HASH_IPV6) != 0)
802                 rss_hf |= (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
803                            ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX);
804
805         if ((efx_hash_types & EFX_RX_HASH_TCPIPV6) != 0)
806                 rss_hf |= (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX);
807
808         return rss_hf;
809 }
810 #endif
811
812 static int
813 sfc_rx_rss_config(struct sfc_adapter *sa)
814 {
815         int rc = 0;
816
817 #if EFSYS_OPT_RX_SCALE
818         if (sa->rss_channels > 1) {
819                 rc = efx_rx_scale_mode_set(sa->nic, EFX_RX_HASHALG_TOEPLITZ,
820                                            sa->rss_hash_types, B_TRUE);
821                 if (rc != 0)
822                         goto finish;
823
824                 rc = efx_rx_scale_key_set(sa->nic, sa->rss_key,
825                                           sizeof(sa->rss_key));
826                 if (rc != 0)
827                         goto finish;
828
829                 rc = efx_rx_scale_tbl_set(sa->nic, sa->rss_tbl,
830                                           sizeof(sa->rss_tbl));
831         }
832
833 finish:
834 #endif
835         return rc;
836 }
837
838 int
839 sfc_rx_start(struct sfc_adapter *sa)
840 {
841         unsigned int sw_index;
842         int rc;
843
844         sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
845
846         rc = efx_rx_init(sa->nic);
847         if (rc != 0)
848                 goto fail_rx_init;
849
850         rc = sfc_rx_rss_config(sa);
851         if (rc != 0)
852                 goto fail_rss_config;
853
854         for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
855                 if ((!sa->rxq_info[sw_index].deferred_start ||
856                      sa->rxq_info[sw_index].deferred_started)) {
857                         rc = sfc_rx_qstart(sa, sw_index);
858                         if (rc != 0)
859                                 goto fail_rx_qstart;
860                 }
861         }
862
863         return 0;
864
865 fail_rx_qstart:
866         while (sw_index-- > 0)
867                 sfc_rx_qstop(sa, sw_index);
868
869 fail_rss_config:
870         efx_rx_fini(sa->nic);
871
872 fail_rx_init:
873         sfc_log_init(sa, "failed %d", rc);
874         return rc;
875 }
876
877 void
878 sfc_rx_stop(struct sfc_adapter *sa)
879 {
880         unsigned int sw_index;
881
882         sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
883
884         sw_index = sa->rxq_count;
885         while (sw_index-- > 0) {
886                 if (sa->rxq_info[sw_index].rxq != NULL)
887                         sfc_rx_qstop(sa, sw_index);
888         }
889
890         efx_rx_fini(sa->nic);
891 }
892
893 static int
894 sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
895 {
896         struct sfc_rxq_info *rxq_info = &sa->rxq_info[sw_index];
897         unsigned int max_entries;
898
899         max_entries = EFX_RXQ_MAXNDESCS;
900         SFC_ASSERT(rte_is_power_of_2(max_entries));
901
902         rxq_info->max_entries = max_entries;
903
904         return 0;
905 }
906
907 static int
908 sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
909 {
910         int rc = 0;
911
912         switch (rxmode->mq_mode) {
913         case ETH_MQ_RX_NONE:
914                 /* No special checks are required */
915                 break;
916 #if EFSYS_OPT_RX_SCALE
917         case ETH_MQ_RX_RSS:
918                 if (sa->rss_support == EFX_RX_SCALE_UNAVAILABLE) {
919                         sfc_err(sa, "RSS is not available");
920                         rc = EINVAL;
921                 }
922                 break;
923 #endif
924         default:
925                 sfc_err(sa, "Rx multi-queue mode %u not supported",
926                         rxmode->mq_mode);
927                 rc = EINVAL;
928         }
929
930         if (rxmode->header_split) {
931                 sfc_err(sa, "Header split on Rx not supported");
932                 rc = EINVAL;
933         }
934
935         if (rxmode->hw_vlan_filter) {
936                 sfc_err(sa, "HW VLAN filtering not supported");
937                 rc = EINVAL;
938         }
939
940         if (rxmode->hw_vlan_strip) {
941                 sfc_err(sa, "HW VLAN stripping not supported");
942                 rc = EINVAL;
943         }
944
945         if (rxmode->hw_vlan_extend) {
946                 sfc_err(sa,
947                         "Q-in-Q HW VLAN stripping not supported");
948                 rc = EINVAL;
949         }
950
951         if (!rxmode->hw_strip_crc) {
952                 sfc_warn(sa,
953                          "FCS stripping control not supported - always stripped");
954                 rxmode->hw_strip_crc = 1;
955         }
956
957         if (rxmode->enable_lro) {
958                 sfc_err(sa, "LRO not supported");
959                 rc = EINVAL;
960         }
961
962         return rc;
963 }
964
965 /**
966  * Initialize Rx subsystem.
967  *
968  * Called at device configuration stage when number of receive queues is
969  * specified together with other device level receive configuration.
970  *
971  * It should be used to allocate NUMA-unaware resources.
972  */
973 int
974 sfc_rx_init(struct sfc_adapter *sa)
975 {
976         struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
977         unsigned int sw_index;
978         int rc;
979
980         rc = sfc_rx_check_mode(sa, &dev_conf->rxmode);
981         if (rc != 0)
982                 goto fail_check_mode;
983
984         sa->rxq_count = sa->eth_dev->data->nb_rx_queues;
985
986         rc = ENOMEM;
987         sa->rxq_info = rte_calloc_socket("sfc-rxqs", sa->rxq_count,
988                                          sizeof(struct sfc_rxq_info), 0,
989                                          sa->socket_id);
990         if (sa->rxq_info == NULL)
991                 goto fail_rxqs_alloc;
992
993         for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
994                 rc = sfc_rx_qinit_info(sa, sw_index);
995                 if (rc != 0)
996                         goto fail_rx_qinit_info;
997         }
998
999 #if EFSYS_OPT_RX_SCALE
1000         sa->rss_channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
1001                            MIN(sa->rxq_count, EFX_MAXRSS) : 1;
1002
1003         if (sa->rss_channels > 1) {
1004                 for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index)
1005                         sa->rss_tbl[sw_index] = sw_index % sa->rss_channels;
1006         }
1007 #endif
1008
1009         return 0;
1010
1011 fail_rx_qinit_info:
1012         rte_free(sa->rxq_info);
1013         sa->rxq_info = NULL;
1014
1015 fail_rxqs_alloc:
1016         sa->rxq_count = 0;
1017 fail_check_mode:
1018         sfc_log_init(sa, "failed %d", rc);
1019         return rc;
1020 }
1021
1022 /**
1023  * Shutdown Rx subsystem.
1024  *
1025  * Called at device close stage, for example, before device
1026  * reconfiguration or shutdown.
1027  */
1028 void
1029 sfc_rx_fini(struct sfc_adapter *sa)
1030 {
1031         unsigned int sw_index;
1032
1033         sw_index = sa->rxq_count;
1034         while (sw_index-- > 0) {
1035                 if (sa->rxq_info[sw_index].rxq != NULL)
1036                         sfc_rx_qfini(sa, sw_index);
1037         }
1038
1039         rte_free(sa->rxq_info);
1040         sa->rxq_info = NULL;
1041         sa->rxq_count = 0;
1042 }