1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2016-2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
10 #include <rte_debug.h>
11 #include <rte_cycles.h>
12 #include <rte_alarm.h>
13 #include <rte_branch_prediction.h>
18 #include "sfc_debug.h"
23 #include "sfc_kvargs.h"
26 /* Initial delay when waiting for event queue init complete event */
27 #define SFC_EVQ_INIT_BACKOFF_START_US (1)
28 /* Maximum delay between event queue polling attempts */
29 #define SFC_EVQ_INIT_BACKOFF_MAX_US (10 * 1000)
30 /* Event queue init approx timeout */
31 #define SFC_EVQ_INIT_TIMEOUT_US (2 * US_PER_S)
33 /* Management event queue polling period in microseconds */
34 #define SFC_MGMT_EV_QPOLL_PERIOD_US (US_PER_S)
37 sfc_evq_type2str(enum sfc_evq_type type)
40 case SFC_EVQ_TYPE_MGMT:
53 sfc_ev_initialized(void *arg)
55 struct sfc_evq *evq = arg;
57 /* Init done events may be duplicated on SFN7xxx (SFC bug 31631) */
58 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTING ||
59 evq->init_state == SFC_EVQ_STARTED);
61 evq->init_state = SFC_EVQ_STARTED;
67 sfc_ev_nop_rx(void *arg, uint32_t label, uint32_t id,
68 uint32_t size, uint16_t flags)
70 struct sfc_evq *evq = arg;
73 "EVQ %u unexpected Rx event label=%u id=%#x size=%u flags=%#x",
74 evq->evq_index, label, id, size, flags);
79 sfc_ev_efx_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
80 uint32_t size, uint16_t flags)
82 struct sfc_evq *evq = arg;
83 struct sfc_efx_rxq *rxq;
85 unsigned int pending_id;
88 struct sfc_efx_rx_sw_desc *rxd;
90 if (unlikely(evq->exception))
93 rxq = sfc_efx_rxq_by_dp_rxq(evq->dp_rxq);
95 SFC_ASSERT(rxq != NULL);
96 SFC_ASSERT(rxq->evq == evq);
97 SFC_ASSERT(rxq->flags & SFC_EFX_RXQ_FLAG_STARTED);
99 stop = (id + 1) & rxq->ptr_mask;
100 pending_id = rxq->pending & rxq->ptr_mask;
101 delta = (stop >= pending_id) ? (stop - pending_id) :
102 (rxq->ptr_mask + 1 - pending_id + stop);
106 * Rx event with no new descriptors done and zero length
107 * is used to abort scattered packet when there is no room
110 if (unlikely(size != 0)) {
111 evq->exception = B_TRUE;
113 "EVQ %u RxQ %u invalid RX abort "
114 "(id=%#x size=%u flags=%#x); needs restart",
115 evq->evq_index, rxq->dp.dpq.queue_id,
120 /* Add discard flag to the first fragment */
121 rxq->sw_desc[pending_id].flags |= EFX_DISCARD;
122 /* Remove continue flag from the last fragment */
123 rxq->sw_desc[id].flags &= ~EFX_PKT_CONT;
124 } else if (unlikely(delta > rxq->batch_max)) {
125 evq->exception = B_TRUE;
128 "EVQ %u RxQ %u completion out of order "
129 "(id=%#x delta=%u flags=%#x); needs restart",
130 evq->evq_index, rxq->dp.dpq.queue_id,
136 for (i = pending_id; i != stop; i = (i + 1) & rxq->ptr_mask) {
137 rxd = &rxq->sw_desc[i];
141 SFC_ASSERT(size < (1 << 16));
142 rxd->size = (uint16_t)size;
145 rxq->pending += delta;
152 sfc_ev_dp_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
153 __rte_unused uint32_t size, __rte_unused uint16_t flags)
155 struct sfc_evq *evq = arg;
156 struct sfc_dp_rxq *dp_rxq;
158 dp_rxq = evq->dp_rxq;
159 SFC_ASSERT(dp_rxq != NULL);
161 SFC_ASSERT(evq->sa->priv.dp_rx->qrx_ev != NULL);
162 return evq->sa->priv.dp_rx->qrx_ev(dp_rxq, id);
166 sfc_ev_nop_rx_packets(void *arg, uint32_t label, unsigned int num_packets,
169 struct sfc_evq *evq = arg;
172 "EVQ %u unexpected Rx packets event label=%u num=%u flags=%#x",
173 evq->evq_index, label, num_packets, flags);
178 sfc_ev_dp_rx_packets(void *arg, __rte_unused uint32_t label,
179 unsigned int num_packets, __rte_unused uint32_t flags)
181 struct sfc_evq *evq = arg;
182 struct sfc_dp_rxq *dp_rxq;
184 dp_rxq = evq->dp_rxq;
185 SFC_ASSERT(dp_rxq != NULL);
187 SFC_ASSERT(evq->sa->priv.dp_rx->qrx_ev != NULL);
188 return evq->sa->priv.dp_rx->qrx_ev(dp_rxq, num_packets);
192 sfc_ev_nop_rx_ps(void *arg, uint32_t label, uint32_t id,
193 uint32_t pkt_count, uint16_t flags)
195 struct sfc_evq *evq = arg;
198 "EVQ %u unexpected packed stream Rx event label=%u id=%#x pkt_count=%u flags=%#x",
199 evq->evq_index, label, id, pkt_count, flags);
203 /* It is not actually used on datapath, but required on RxQ flush */
205 sfc_ev_dp_rx_ps(void *arg, __rte_unused uint32_t label, uint32_t id,
206 __rte_unused uint32_t pkt_count, __rte_unused uint16_t flags)
208 struct sfc_evq *evq = arg;
209 struct sfc_dp_rxq *dp_rxq;
211 dp_rxq = evq->dp_rxq;
212 SFC_ASSERT(dp_rxq != NULL);
214 if (evq->sa->priv.dp_rx->qrx_ps_ev != NULL)
215 return evq->sa->priv.dp_rx->qrx_ps_ev(dp_rxq, id);
221 sfc_ev_nop_tx(void *arg, uint32_t label, uint32_t id)
223 struct sfc_evq *evq = arg;
225 sfc_err(evq->sa, "EVQ %u unexpected Tx event label=%u id=%#x",
226 evq->evq_index, label, id);
231 sfc_ev_tx(void *arg, __rte_unused uint32_t label, uint32_t id)
233 struct sfc_evq *evq = arg;
234 struct sfc_dp_txq *dp_txq;
235 struct sfc_efx_txq *txq;
239 dp_txq = evq->dp_txq;
240 SFC_ASSERT(dp_txq != NULL);
242 txq = sfc_efx_txq_by_dp_txq(dp_txq);
243 SFC_ASSERT(txq->evq == evq);
245 if (unlikely((txq->flags & SFC_EFX_TXQ_FLAG_STARTED) == 0))
248 stop = (id + 1) & txq->ptr_mask;
249 id = txq->pending & txq->ptr_mask;
251 delta = (stop >= id) ? (stop - id) : (txq->ptr_mask + 1 - id + stop);
253 txq->pending += delta;
260 sfc_ev_dp_tx(void *arg, __rte_unused uint32_t label, uint32_t id)
262 struct sfc_evq *evq = arg;
263 struct sfc_dp_txq *dp_txq;
265 dp_txq = evq->dp_txq;
266 SFC_ASSERT(dp_txq != NULL);
268 SFC_ASSERT(evq->sa->priv.dp_tx->qtx_ev != NULL);
269 return evq->sa->priv.dp_tx->qtx_ev(dp_txq, id);
273 sfc_ev_nop_tx_ndescs(void *arg, uint32_t label, unsigned int ndescs)
275 struct sfc_evq *evq = arg;
277 sfc_err(evq->sa, "EVQ %u unexpected Tx event label=%u ndescs=%#x",
278 evq->evq_index, label, ndescs);
283 sfc_ev_dp_tx_ndescs(void *arg, __rte_unused uint32_t label,
286 struct sfc_evq *evq = arg;
287 struct sfc_dp_txq *dp_txq;
289 dp_txq = evq->dp_txq;
290 SFC_ASSERT(dp_txq != NULL);
292 SFC_ASSERT(evq->sa->priv.dp_tx->qtx_ev != NULL);
293 return evq->sa->priv.dp_tx->qtx_ev(dp_txq, ndescs);
297 sfc_ev_exception(void *arg, uint32_t code, __rte_unused uint32_t data)
299 struct sfc_evq *evq = arg;
301 if (code == EFX_EXCEPTION_UNKNOWN_SENSOREVT)
304 evq->exception = B_TRUE;
306 "hardware exception %s (code=%u, data=%#x) on EVQ %u;"
308 (code == EFX_EXCEPTION_RX_RECOVERY) ? "RX_RECOVERY" :
309 (code == EFX_EXCEPTION_RX_DSC_ERROR) ? "RX_DSC_ERROR" :
310 (code == EFX_EXCEPTION_TX_DSC_ERROR) ? "TX_DSC_ERROR" :
311 (code == EFX_EXCEPTION_FWALERT_SRAM) ? "FWALERT_SRAM" :
312 (code == EFX_EXCEPTION_UNKNOWN_FWALERT) ? "UNKNOWN_FWALERT" :
313 (code == EFX_EXCEPTION_RX_ERROR) ? "RX_ERROR" :
314 (code == EFX_EXCEPTION_TX_ERROR) ? "TX_ERROR" :
315 (code == EFX_EXCEPTION_EV_ERROR) ? "EV_ERROR" :
317 code, data, evq->evq_index);
323 sfc_ev_nop_rxq_flush_done(void *arg, uint32_t rxq_hw_index)
325 struct sfc_evq *evq = arg;
327 sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush done",
328 evq->evq_index, rxq_hw_index);
333 sfc_ev_rxq_flush_done(void *arg, __rte_unused uint32_t rxq_hw_index)
335 struct sfc_evq *evq = arg;
336 struct sfc_dp_rxq *dp_rxq;
339 dp_rxq = evq->dp_rxq;
340 SFC_ASSERT(dp_rxq != NULL);
342 rxq = sfc_rxq_by_dp_rxq(dp_rxq);
343 SFC_ASSERT(rxq != NULL);
344 SFC_ASSERT(rxq->hw_index == rxq_hw_index);
345 SFC_ASSERT(rxq->evq == evq);
348 sfc_rx_qflush_done(sfc_rxq_info_by_dp_rxq(dp_rxq));
354 sfc_ev_nop_rxq_flush_failed(void *arg, uint32_t rxq_hw_index)
356 struct sfc_evq *evq = arg;
358 sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush failed",
359 evq->evq_index, rxq_hw_index);
364 sfc_ev_rxq_flush_failed(void *arg, __rte_unused uint32_t rxq_hw_index)
366 struct sfc_evq *evq = arg;
367 struct sfc_dp_rxq *dp_rxq;
370 dp_rxq = evq->dp_rxq;
371 SFC_ASSERT(dp_rxq != NULL);
373 rxq = sfc_rxq_by_dp_rxq(dp_rxq);
374 SFC_ASSERT(rxq != NULL);
375 SFC_ASSERT(rxq->hw_index == rxq_hw_index);
376 SFC_ASSERT(rxq->evq == evq);
379 sfc_rx_qflush_failed(sfc_rxq_info_by_dp_rxq(dp_rxq));
385 sfc_ev_nop_txq_flush_done(void *arg, uint32_t txq_hw_index)
387 struct sfc_evq *evq = arg;
389 sfc_err(evq->sa, "EVQ %u unexpected TxQ %u flush done",
390 evq->evq_index, txq_hw_index);
395 sfc_ev_txq_flush_done(void *arg, __rte_unused uint32_t txq_hw_index)
397 struct sfc_evq *evq = arg;
398 struct sfc_dp_txq *dp_txq;
401 dp_txq = evq->dp_txq;
402 SFC_ASSERT(dp_txq != NULL);
404 txq = sfc_txq_by_dp_txq(dp_txq);
405 SFC_ASSERT(txq != NULL);
406 SFC_ASSERT(txq->hw_index == txq_hw_index);
407 SFC_ASSERT(txq->evq == evq);
410 sfc_tx_qflush_done(sfc_txq_info_by_dp_txq(dp_txq));
416 sfc_ev_software(void *arg, uint16_t magic)
418 struct sfc_evq *evq = arg;
420 sfc_err(evq->sa, "EVQ %u unexpected software event magic=%#.4x",
421 evq->evq_index, magic);
426 sfc_ev_sram(void *arg, uint32_t code)
428 struct sfc_evq *evq = arg;
430 sfc_err(evq->sa, "EVQ %u unexpected SRAM event code=%u",
431 evq->evq_index, code);
436 sfc_ev_wake_up(void *arg, uint32_t index)
438 struct sfc_evq *evq = arg;
440 sfc_err(evq->sa, "EVQ %u unexpected wake up event index=%u",
441 evq->evq_index, index);
446 sfc_ev_timer(void *arg, uint32_t index)
448 struct sfc_evq *evq = arg;
450 sfc_err(evq->sa, "EVQ %u unexpected timer event index=%u",
451 evq->evq_index, index);
456 sfc_ev_nop_link_change(void *arg, __rte_unused efx_link_mode_t link_mode)
458 struct sfc_evq *evq = arg;
460 sfc_err(evq->sa, "EVQ %u unexpected link change event",
466 sfc_ev_link_change(void *arg, efx_link_mode_t link_mode)
468 struct sfc_evq *evq = arg;
469 struct sfc_adapter *sa = evq->sa;
470 struct rte_eth_link new_link;
472 sfc_port_link_mode_to_info(link_mode, &new_link);
473 if (rte_eth_linkstatus_set(sa->eth_dev, &new_link) == 0)
474 evq->sa->port.lsc_seq++;
479 static const efx_ev_callbacks_t sfc_ev_callbacks = {
480 .eec_initialized = sfc_ev_initialized,
481 .eec_rx = sfc_ev_nop_rx,
482 .eec_rx_packets = sfc_ev_nop_rx_packets,
483 .eec_rx_ps = sfc_ev_nop_rx_ps,
484 .eec_tx = sfc_ev_nop_tx,
485 .eec_tx_ndescs = sfc_ev_nop_tx_ndescs,
486 .eec_exception = sfc_ev_exception,
487 .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done,
488 .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed,
489 .eec_txq_flush_done = sfc_ev_nop_txq_flush_done,
490 .eec_software = sfc_ev_software,
491 .eec_sram = sfc_ev_sram,
492 .eec_wake_up = sfc_ev_wake_up,
493 .eec_timer = sfc_ev_timer,
494 .eec_link_change = sfc_ev_link_change,
497 static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = {
498 .eec_initialized = sfc_ev_initialized,
499 .eec_rx = sfc_ev_efx_rx,
500 .eec_rx_packets = sfc_ev_nop_rx_packets,
501 .eec_rx_ps = sfc_ev_nop_rx_ps,
502 .eec_tx = sfc_ev_nop_tx,
503 .eec_tx_ndescs = sfc_ev_nop_tx_ndescs,
504 .eec_exception = sfc_ev_exception,
505 .eec_rxq_flush_done = sfc_ev_rxq_flush_done,
506 .eec_rxq_flush_failed = sfc_ev_rxq_flush_failed,
507 .eec_txq_flush_done = sfc_ev_nop_txq_flush_done,
508 .eec_software = sfc_ev_software,
509 .eec_sram = sfc_ev_sram,
510 .eec_wake_up = sfc_ev_wake_up,
511 .eec_timer = sfc_ev_timer,
512 .eec_link_change = sfc_ev_nop_link_change,
515 static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = {
516 .eec_initialized = sfc_ev_initialized,
517 .eec_rx = sfc_ev_dp_rx,
518 .eec_rx_packets = sfc_ev_dp_rx_packets,
519 .eec_rx_ps = sfc_ev_dp_rx_ps,
520 .eec_tx = sfc_ev_nop_tx,
521 .eec_tx_ndescs = sfc_ev_nop_tx_ndescs,
522 .eec_exception = sfc_ev_exception,
523 .eec_rxq_flush_done = sfc_ev_rxq_flush_done,
524 .eec_rxq_flush_failed = sfc_ev_rxq_flush_failed,
525 .eec_txq_flush_done = sfc_ev_nop_txq_flush_done,
526 .eec_software = sfc_ev_software,
527 .eec_sram = sfc_ev_sram,
528 .eec_wake_up = sfc_ev_wake_up,
529 .eec_timer = sfc_ev_timer,
530 .eec_link_change = sfc_ev_nop_link_change,
533 static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx = {
534 .eec_initialized = sfc_ev_initialized,
535 .eec_rx = sfc_ev_nop_rx,
536 .eec_rx_packets = sfc_ev_nop_rx_packets,
537 .eec_rx_ps = sfc_ev_nop_rx_ps,
539 .eec_tx_ndescs = sfc_ev_nop_tx_ndescs,
540 .eec_exception = sfc_ev_exception,
541 .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done,
542 .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed,
543 .eec_txq_flush_done = sfc_ev_txq_flush_done,
544 .eec_software = sfc_ev_software,
545 .eec_sram = sfc_ev_sram,
546 .eec_wake_up = sfc_ev_wake_up,
547 .eec_timer = sfc_ev_timer,
548 .eec_link_change = sfc_ev_nop_link_change,
551 static const efx_ev_callbacks_t sfc_ev_callbacks_dp_tx = {
552 .eec_initialized = sfc_ev_initialized,
553 .eec_rx = sfc_ev_nop_rx,
554 .eec_rx_packets = sfc_ev_nop_rx_packets,
555 .eec_rx_ps = sfc_ev_nop_rx_ps,
556 .eec_tx = sfc_ev_dp_tx,
557 .eec_tx_ndescs = sfc_ev_dp_tx_ndescs,
558 .eec_exception = sfc_ev_exception,
559 .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done,
560 .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed,
561 .eec_txq_flush_done = sfc_ev_txq_flush_done,
562 .eec_software = sfc_ev_software,
563 .eec_sram = sfc_ev_sram,
564 .eec_wake_up = sfc_ev_wake_up,
565 .eec_timer = sfc_ev_timer,
566 .eec_link_change = sfc_ev_nop_link_change,
571 sfc_ev_qpoll(struct sfc_evq *evq)
573 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED ||
574 evq->init_state == SFC_EVQ_STARTING);
576 /* Synchronize the DMA memory for reading not required */
578 efx_ev_qpoll(evq->common, &evq->read_ptr, evq->callbacks, evq);
580 if (unlikely(evq->exception) && sfc_adapter_trylock(evq->sa)) {
581 struct sfc_adapter *sa = evq->sa;
584 if (evq->dp_rxq != NULL) {
585 sfc_sw_index_t rxq_sw_index;
587 rxq_sw_index = evq->dp_rxq->dpq.queue_id;
590 "restart RxQ %u because of exception on its EvQ %u",
591 rxq_sw_index, evq->evq_index);
593 sfc_rx_qstop(sa, rxq_sw_index);
594 rc = sfc_rx_qstart(sa, rxq_sw_index);
596 sfc_err(sa, "cannot restart RxQ %u",
600 if (evq->dp_txq != NULL) {
601 sfc_sw_index_t txq_sw_index;
603 txq_sw_index = evq->dp_txq->dpq.queue_id;
606 "restart TxQ %u because of exception on its EvQ %u",
607 txq_sw_index, evq->evq_index);
609 sfc_tx_qstop(sa, txq_sw_index);
610 rc = sfc_tx_qstart(sa, txq_sw_index);
612 sfc_err(sa, "cannot restart TxQ %u",
617 sfc_panic(sa, "unrecoverable exception on EvQ %u",
620 sfc_adapter_unlock(sa);
623 /* Poll-mode driver does not re-prime the event queue for interrupts */
627 sfc_ev_mgmt_qpoll(struct sfc_adapter *sa)
629 if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) {
630 if (sa->mgmt_evq_running)
631 sfc_ev_qpoll(sa->mgmt_evq);
633 rte_spinlock_unlock(&sa->mgmt_evq_lock);
638 sfc_ev_qprime(struct sfc_evq *evq)
640 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED);
641 return efx_ev_qprime(evq->common, evq->read_ptr);
644 /* Event queue HW index allocation scheme is described in sfc_ev.h. */
646 sfc_ev_qstart(struct sfc_evq *evq, unsigned int hw_index)
648 struct sfc_adapter *sa = evq->sa;
650 uint32_t evq_flags = sa->evq_flags;
652 unsigned int total_delay_us;
653 unsigned int delay_us;
656 sfc_log_init(sa, "hw_index=%u", hw_index);
660 evq->evq_index = hw_index;
662 /* Clear all events */
663 (void)memset((void *)esmp->esm_base, 0xff,
664 efx_evq_size(sa->nic, evq->entries, evq_flags));
666 if (sa->intr.lsc_intr && hw_index == sa->mgmt_evq_index) {
667 evq_flags |= EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
669 } else if (sa->intr.rxq_intr && evq->dp_rxq != NULL) {
670 sfc_ethdev_qid_t ethdev_qid;
673 sfc_ethdev_rx_qid_by_rxq_sw_index(sfc_sa2shared(sa),
674 evq->dp_rxq->dpq.queue_id);
675 if (ethdev_qid != SFC_ETHDEV_QID_INVALID) {
676 evq_flags |= EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
678 * The first interrupt is used for management EvQ
679 * (LSC etc). RxQ interrupts follow it.
681 irq = 1 + ethdev_qid;
683 evq_flags |= EFX_EVQ_FLAGS_NOTIFY_DISABLED;
686 evq_flags |= EFX_EVQ_FLAGS_NOTIFY_DISABLED;
689 evq->init_state = SFC_EVQ_STARTING;
691 /* Create the common code event queue */
692 rc = efx_ev_qcreate_irq(sa->nic, hw_index, esmp, evq->entries,
693 0 /* unused on EF10 */, 0, evq_flags,
696 goto fail_ev_qcreate;
698 SFC_ASSERT(evq->dp_rxq == NULL || evq->dp_txq == NULL);
699 if (evq->dp_rxq != 0) {
700 if (strcmp(sa->priv.dp_rx->dp.name,
701 SFC_KVARG_DATAPATH_EFX) == 0)
702 evq->callbacks = &sfc_ev_callbacks_efx_rx;
704 evq->callbacks = &sfc_ev_callbacks_dp_rx;
705 } else if (evq->dp_txq != 0) {
706 if (strcmp(sa->priv.dp_tx->dp.name,
707 SFC_KVARG_DATAPATH_EFX) == 0)
708 evq->callbacks = &sfc_ev_callbacks_efx_tx;
710 evq->callbacks = &sfc_ev_callbacks_dp_tx;
712 evq->callbacks = &sfc_ev_callbacks;
716 * Poll once to ensure that eec_initialized callback is invoked in
717 * case if the hardware does not support INIT_DONE events. If the
718 * hardware supports INIT_DONE events, this will do nothing, and the
719 * corresponding event will be processed by sfc_ev_qpoll() below.
721 efx_ev_qcreate_check_init_done(evq->common, evq->callbacks, evq);
723 /* Wait for the initialization event */
725 delay_us = SFC_EVQ_INIT_BACKOFF_START_US;
727 (void)sfc_ev_qpoll(evq);
729 /* Check to see if the initialization complete indication
730 * posted by the hardware.
732 if (evq->init_state == SFC_EVQ_STARTED)
735 /* Give event queue some time to init */
736 rte_delay_us(delay_us);
738 total_delay_us += delay_us;
740 /* Exponential backoff */
742 if (delay_us > SFC_EVQ_INIT_BACKOFF_MAX_US)
743 delay_us = SFC_EVQ_INIT_BACKOFF_MAX_US;
745 } while (total_delay_us < SFC_EVQ_INIT_TIMEOUT_US);
754 efx_ev_qdestroy(evq->common);
757 evq->init_state = SFC_EVQ_INITIALIZED;
758 sfc_log_init(sa, "failed %d", rc);
763 sfc_ev_qstop(struct sfc_evq *evq)
768 sfc_log_init(evq->sa, "hw_index=%u", evq->evq_index);
770 if (evq->init_state != SFC_EVQ_STARTED)
773 evq->init_state = SFC_EVQ_INITIALIZED;
774 evq->callbacks = NULL;
776 evq->exception = B_FALSE;
778 efx_ev_qdestroy(evq->common);
784 sfc_ev_mgmt_periodic_qpoll(void *arg)
786 struct sfc_adapter *sa = arg;
789 sfc_ev_mgmt_qpoll(sa);
791 rc = rte_eal_alarm_set(SFC_MGMT_EV_QPOLL_PERIOD_US,
792 sfc_ev_mgmt_periodic_qpoll, sa);
793 if (rc == -ENOTSUP) {
794 sfc_warn(sa, "alarms are not supported");
795 sfc_warn(sa, "management EVQ must be polled indirectly using no-wait link status update");
796 } else if (rc != 0) {
798 "cannot rearm management EVQ polling alarm (rc=%d)",
804 sfc_ev_mgmt_periodic_qpoll_start(struct sfc_adapter *sa)
806 sfc_ev_mgmt_periodic_qpoll(sa);
810 sfc_ev_mgmt_periodic_qpoll_stop(struct sfc_adapter *sa)
812 rte_eal_alarm_cancel(sfc_ev_mgmt_periodic_qpoll, sa);
816 sfc_ev_start(struct sfc_adapter *sa)
820 sfc_log_init(sa, "entry");
822 rc = efx_ev_init(sa->nic);
826 /* Start management EVQ used for global events */
829 * Management event queue start polls the queue, but it cannot
830 * interfere with other polling contexts since mgmt_evq_running
833 rc = sfc_ev_qstart(sa->mgmt_evq, sa->mgmt_evq_index);
835 goto fail_mgmt_evq_start;
837 rte_spinlock_lock(&sa->mgmt_evq_lock);
838 sa->mgmt_evq_running = true;
839 rte_spinlock_unlock(&sa->mgmt_evq_lock);
841 if (sa->intr.lsc_intr) {
842 rc = sfc_ev_qprime(sa->mgmt_evq);
844 goto fail_mgmt_evq_prime;
848 * Start management EVQ polling. If interrupts are disabled
849 * (not used), it is required to process link status change
850 * and other device level events to avoid unrecoverable
851 * error because the event queue overflow.
853 sfc_ev_mgmt_periodic_qpoll_start(sa);
856 * Rx/Tx event queues are started/stopped when corresponding
857 * Rx/Tx queue is started/stopped.
863 sfc_ev_qstop(sa->mgmt_evq);
866 efx_ev_fini(sa->nic);
869 sfc_log_init(sa, "failed %d", rc);
874 sfc_ev_stop(struct sfc_adapter *sa)
876 sfc_log_init(sa, "entry");
878 sfc_ev_mgmt_periodic_qpoll_stop(sa);
880 rte_spinlock_lock(&sa->mgmt_evq_lock);
881 sa->mgmt_evq_running = false;
882 rte_spinlock_unlock(&sa->mgmt_evq_lock);
884 sfc_ev_qstop(sa->mgmt_evq);
886 efx_ev_fini(sa->nic);
890 sfc_ev_qinit(struct sfc_adapter *sa,
891 enum sfc_evq_type type, unsigned int type_index,
892 unsigned int entries, int socket_id, struct sfc_evq **evqp)
897 sfc_log_init(sa, "type=%s type_index=%u",
898 sfc_evq_type2str(type), type_index);
900 SFC_ASSERT(rte_is_power_of_2(entries));
903 evq = rte_zmalloc_socket("sfc-evq", sizeof(*evq), RTE_CACHE_LINE_SIZE,
910 evq->entries = entries;
912 /* Allocate DMA space */
913 rc = sfc_dma_alloc(sa, sfc_evq_type2str(type), type_index,
914 efx_evq_size(sa->nic, evq->entries, sa->evq_flags),
915 socket_id, &evq->mem);
919 evq->init_state = SFC_EVQ_INITIALIZED;
932 sfc_log_init(sa, "failed %d", rc);
937 sfc_ev_qfini(struct sfc_evq *evq)
939 struct sfc_adapter *sa = evq->sa;
941 SFC_ASSERT(evq->init_state == SFC_EVQ_INITIALIZED);
943 sfc_dma_free(sa, &evq->mem);
947 SFC_ASSERT(sa->evq_count > 0);
952 sfc_kvarg_perf_profile_handler(__rte_unused const char *key,
953 const char *value_str, void *opaque)
955 uint32_t *value = opaque;
957 if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_THROUGHPUT) == 0)
958 *value = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
959 else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_LOW_LATENCY) == 0)
960 *value = EFX_EVQ_FLAGS_TYPE_LOW_LATENCY;
961 else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_AUTO) == 0)
962 *value = EFX_EVQ_FLAGS_TYPE_AUTO;
970 sfc_ev_attach(struct sfc_adapter *sa)
974 sfc_log_init(sa, "entry");
976 sa->evq_flags = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
977 rc = sfc_kvargs_process(sa, SFC_KVARG_PERF_PROFILE,
978 sfc_kvarg_perf_profile_handler,
981 sfc_err(sa, "invalid %s parameter value",
982 SFC_KVARG_PERF_PROFILE);
983 goto fail_kvarg_perf_profile;
986 sa->mgmt_evq_index = sfc_mgmt_evq_sw_index(sfc_sa2shared(sa));
987 rte_spinlock_init(&sa->mgmt_evq_lock);
989 rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_MGMT, 0, sa->evq_min_entries,
990 sa->socket_id, &sa->mgmt_evq);
992 goto fail_mgmt_evq_init;
995 * Rx/Tx event queues are created/destroyed when corresponding
996 * Rx/Tx queue is created/destroyed.
1003 fail_kvarg_perf_profile:
1004 sfc_log_init(sa, "failed %d", rc);
1009 sfc_ev_detach(struct sfc_adapter *sa)
1011 sfc_log_init(sa, "entry");
1013 sfc_ev_qfini(sa->mgmt_evq);
1015 if (sa->evq_count != 0)
1016 sfc_err(sa, "%u EvQs are not destroyed before detach",