4 * Copyright (c) 2016-2017 Solarflare Communications Inc.
7 * This software was jointly developed between OKTET Labs (under contract
8 * for Solarflare) and Solarflare Communications, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are met:
13 * 1. Redistributions of source code must retain the above copyright notice,
14 * this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include <rte_debug.h>
33 #include <rte_cycles.h>
34 #include <rte_alarm.h>
35 #include <rte_branch_prediction.h>
40 #include "sfc_debug.h"
45 #include "sfc_kvargs.h"
48 /* Initial delay when waiting for event queue init complete event */
49 #define SFC_EVQ_INIT_BACKOFF_START_US (1)
50 /* Maximum delay between event queue polling attempts */
51 #define SFC_EVQ_INIT_BACKOFF_MAX_US (10 * 1000)
52 /* Event queue init approx timeout */
53 #define SFC_EVQ_INIT_TIMEOUT_US (2 * US_PER_S)
55 /* Management event queue polling period in microseconds */
56 #define SFC_MGMT_EV_QPOLL_PERIOD_US (US_PER_S)
59 sfc_evq_type2str(enum sfc_evq_type type)
62 case SFC_EVQ_TYPE_MGMT:
75 sfc_ev_initialized(void *arg)
77 struct sfc_evq *evq = arg;
79 /* Init done events may be duplicated on SFN7xxx (SFC bug 31631) */
80 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTING ||
81 evq->init_state == SFC_EVQ_STARTED);
83 evq->init_state = SFC_EVQ_STARTED;
89 sfc_ev_nop_rx(void *arg, uint32_t label, uint32_t id,
90 uint32_t size, uint16_t flags)
92 struct sfc_evq *evq = arg;
95 "EVQ %u unexpected Rx event label=%u id=%#x size=%u flags=%#x",
96 evq->evq_index, label, id, size, flags);
101 sfc_ev_efx_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
102 uint32_t size, uint16_t flags)
104 struct sfc_evq *evq = arg;
105 struct sfc_efx_rxq *rxq;
107 unsigned int pending_id;
110 struct sfc_efx_rx_sw_desc *rxd;
112 if (unlikely(evq->exception))
115 rxq = sfc_efx_rxq_by_dp_rxq(evq->dp_rxq);
117 SFC_ASSERT(rxq != NULL);
118 SFC_ASSERT(rxq->evq == evq);
119 SFC_ASSERT(rxq->flags & SFC_EFX_RXQ_FLAG_STARTED);
121 stop = (id + 1) & rxq->ptr_mask;
122 pending_id = rxq->pending & rxq->ptr_mask;
123 delta = (stop >= pending_id) ? (stop - pending_id) :
124 (rxq->ptr_mask + 1 - pending_id + stop);
128 * Rx event with no new descriptors done and zero length
129 * is used to abort scattered packet when there is no room
132 if (unlikely(size != 0)) {
133 evq->exception = B_TRUE;
135 "EVQ %u RxQ %u invalid RX abort "
136 "(id=%#x size=%u flags=%#x); needs restart",
137 evq->evq_index, rxq->dp.dpq.queue_id,
142 /* Add discard flag to the first fragment */
143 rxq->sw_desc[pending_id].flags |= EFX_DISCARD;
144 /* Remove continue flag from the last fragment */
145 rxq->sw_desc[id].flags &= ~EFX_PKT_CONT;
146 } else if (unlikely(delta > rxq->batch_max)) {
147 evq->exception = B_TRUE;
150 "EVQ %u RxQ %u completion out of order "
151 "(id=%#x delta=%u flags=%#x); needs restart",
152 evq->evq_index, rxq->dp.dpq.queue_id,
158 for (i = pending_id; i != stop; i = (i + 1) & rxq->ptr_mask) {
159 rxd = &rxq->sw_desc[i];
163 SFC_ASSERT(size < (1 << 16));
164 rxd->size = (uint16_t)size;
167 rxq->pending += delta;
174 sfc_ev_dp_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
175 __rte_unused uint32_t size, __rte_unused uint16_t flags)
177 struct sfc_evq *evq = arg;
178 struct sfc_dp_rxq *dp_rxq;
180 dp_rxq = evq->dp_rxq;
181 SFC_ASSERT(dp_rxq != NULL);
183 SFC_ASSERT(evq->sa->dp_rx->qrx_ev != NULL);
184 return evq->sa->dp_rx->qrx_ev(dp_rxq, id);
188 sfc_ev_nop_tx(void *arg, uint32_t label, uint32_t id)
190 struct sfc_evq *evq = arg;
192 sfc_err(evq->sa, "EVQ %u unexpected Tx event label=%u id=%#x",
193 evq->evq_index, label, id);
198 sfc_ev_tx(void *arg, __rte_unused uint32_t label, uint32_t id)
200 struct sfc_evq *evq = arg;
201 struct sfc_dp_txq *dp_txq;
202 struct sfc_efx_txq *txq;
206 dp_txq = evq->dp_txq;
207 SFC_ASSERT(dp_txq != NULL);
209 txq = sfc_efx_txq_by_dp_txq(dp_txq);
210 SFC_ASSERT(txq->evq == evq);
212 if (unlikely((txq->flags & SFC_EFX_TXQ_FLAG_STARTED) == 0))
215 stop = (id + 1) & txq->ptr_mask;
216 id = txq->pending & txq->ptr_mask;
218 delta = (stop >= id) ? (stop - id) : (txq->ptr_mask + 1 - id + stop);
220 txq->pending += delta;
227 sfc_ev_dp_tx(void *arg, __rte_unused uint32_t label, uint32_t id)
229 struct sfc_evq *evq = arg;
230 struct sfc_dp_txq *dp_txq;
232 dp_txq = evq->dp_txq;
233 SFC_ASSERT(dp_txq != NULL);
235 SFC_ASSERT(evq->sa->dp_tx->qtx_ev != NULL);
236 return evq->sa->dp_tx->qtx_ev(dp_txq, id);
240 sfc_ev_exception(void *arg, uint32_t code, __rte_unused uint32_t data)
242 struct sfc_evq *evq = arg;
244 if (code == EFX_EXCEPTION_UNKNOWN_SENSOREVT)
247 evq->exception = B_TRUE;
249 "hardware exception %s (code=%u, data=%#x) on EVQ %u;"
251 (code == EFX_EXCEPTION_RX_RECOVERY) ? "RX_RECOVERY" :
252 (code == EFX_EXCEPTION_RX_DSC_ERROR) ? "RX_DSC_ERROR" :
253 (code == EFX_EXCEPTION_TX_DSC_ERROR) ? "TX_DSC_ERROR" :
254 (code == EFX_EXCEPTION_FWALERT_SRAM) ? "FWALERT_SRAM" :
255 (code == EFX_EXCEPTION_UNKNOWN_FWALERT) ? "UNKNOWN_FWALERT" :
256 (code == EFX_EXCEPTION_RX_ERROR) ? "RX_ERROR" :
257 (code == EFX_EXCEPTION_TX_ERROR) ? "TX_ERROR" :
258 (code == EFX_EXCEPTION_EV_ERROR) ? "EV_ERROR" :
260 code, data, evq->evq_index);
266 sfc_ev_nop_rxq_flush_done(void *arg, uint32_t rxq_hw_index)
268 struct sfc_evq *evq = arg;
270 sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush done",
271 evq->evq_index, rxq_hw_index);
276 sfc_ev_rxq_flush_done(void *arg, __rte_unused uint32_t rxq_hw_index)
278 struct sfc_evq *evq = arg;
279 struct sfc_dp_rxq *dp_rxq;
282 dp_rxq = evq->dp_rxq;
283 SFC_ASSERT(dp_rxq != NULL);
285 rxq = sfc_rxq_by_dp_rxq(dp_rxq);
286 SFC_ASSERT(rxq != NULL);
287 SFC_ASSERT(rxq->hw_index == rxq_hw_index);
288 SFC_ASSERT(rxq->evq == evq);
289 sfc_rx_qflush_done(rxq);
295 sfc_ev_nop_rxq_flush_failed(void *arg, uint32_t rxq_hw_index)
297 struct sfc_evq *evq = arg;
299 sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush failed",
300 evq->evq_index, rxq_hw_index);
305 sfc_ev_rxq_flush_failed(void *arg, __rte_unused uint32_t rxq_hw_index)
307 struct sfc_evq *evq = arg;
308 struct sfc_dp_rxq *dp_rxq;
311 dp_rxq = evq->dp_rxq;
312 SFC_ASSERT(dp_rxq != NULL);
314 rxq = sfc_rxq_by_dp_rxq(dp_rxq);
315 SFC_ASSERT(rxq != NULL);
316 SFC_ASSERT(rxq->hw_index == rxq_hw_index);
317 SFC_ASSERT(rxq->evq == evq);
318 sfc_rx_qflush_failed(rxq);
324 sfc_ev_nop_txq_flush_done(void *arg, uint32_t txq_hw_index)
326 struct sfc_evq *evq = arg;
328 sfc_err(evq->sa, "EVQ %u unexpected TxQ %u flush done",
329 evq->evq_index, txq_hw_index);
334 sfc_ev_txq_flush_done(void *arg, __rte_unused uint32_t txq_hw_index)
336 struct sfc_evq *evq = arg;
337 struct sfc_dp_txq *dp_txq;
340 dp_txq = evq->dp_txq;
341 SFC_ASSERT(dp_txq != NULL);
343 txq = sfc_txq_by_dp_txq(dp_txq);
344 SFC_ASSERT(txq != NULL);
345 SFC_ASSERT(txq->hw_index == txq_hw_index);
346 SFC_ASSERT(txq->evq == evq);
347 sfc_tx_qflush_done(txq);
353 sfc_ev_software(void *arg, uint16_t magic)
355 struct sfc_evq *evq = arg;
357 sfc_err(evq->sa, "EVQ %u unexpected software event magic=%#.4x",
358 evq->evq_index, magic);
363 sfc_ev_sram(void *arg, uint32_t code)
365 struct sfc_evq *evq = arg;
367 sfc_err(evq->sa, "EVQ %u unexpected SRAM event code=%u",
368 evq->evq_index, code);
373 sfc_ev_wake_up(void *arg, uint32_t index)
375 struct sfc_evq *evq = arg;
377 sfc_err(evq->sa, "EVQ %u unexpected wake up event index=%u",
378 evq->evq_index, index);
383 sfc_ev_timer(void *arg, uint32_t index)
385 struct sfc_evq *evq = arg;
387 sfc_err(evq->sa, "EVQ %u unexpected timer event index=%u",
388 evq->evq_index, index);
393 sfc_ev_nop_link_change(void *arg, __rte_unused efx_link_mode_t link_mode)
395 struct sfc_evq *evq = arg;
397 sfc_err(evq->sa, "EVQ %u unexpected link change event",
403 sfc_ev_link_change(void *arg, efx_link_mode_t link_mode)
405 struct sfc_evq *evq = arg;
406 struct sfc_adapter *sa = evq->sa;
407 struct rte_eth_link *dev_link = &sa->eth_dev->data->dev_link;
408 struct rte_eth_link new_link;
409 uint64_t new_link_u64;
410 uint64_t old_link_u64;
412 EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
414 sfc_port_link_mode_to_info(link_mode, &new_link);
416 new_link_u64 = *(uint64_t *)&new_link;
418 old_link_u64 = rte_atomic64_read((rte_atomic64_t *)dev_link);
419 if (old_link_u64 == new_link_u64)
422 if (rte_atomic64_cmpset((volatile uint64_t *)dev_link,
423 old_link_u64, new_link_u64)) {
424 evq->sa->port.lsc_seq++;
432 static const efx_ev_callbacks_t sfc_ev_callbacks = {
433 .eec_initialized = sfc_ev_initialized,
434 .eec_rx = sfc_ev_nop_rx,
435 .eec_tx = sfc_ev_nop_tx,
436 .eec_exception = sfc_ev_exception,
437 .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done,
438 .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed,
439 .eec_txq_flush_done = sfc_ev_nop_txq_flush_done,
440 .eec_software = sfc_ev_software,
441 .eec_sram = sfc_ev_sram,
442 .eec_wake_up = sfc_ev_wake_up,
443 .eec_timer = sfc_ev_timer,
444 .eec_link_change = sfc_ev_link_change,
447 static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = {
448 .eec_initialized = sfc_ev_initialized,
449 .eec_rx = sfc_ev_efx_rx,
450 .eec_tx = sfc_ev_nop_tx,
451 .eec_exception = sfc_ev_exception,
452 .eec_rxq_flush_done = sfc_ev_rxq_flush_done,
453 .eec_rxq_flush_failed = sfc_ev_rxq_flush_failed,
454 .eec_txq_flush_done = sfc_ev_nop_txq_flush_done,
455 .eec_software = sfc_ev_software,
456 .eec_sram = sfc_ev_sram,
457 .eec_wake_up = sfc_ev_wake_up,
458 .eec_timer = sfc_ev_timer,
459 .eec_link_change = sfc_ev_nop_link_change,
462 static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = {
463 .eec_initialized = sfc_ev_initialized,
464 .eec_rx = sfc_ev_dp_rx,
465 .eec_tx = sfc_ev_nop_tx,
466 .eec_exception = sfc_ev_exception,
467 .eec_rxq_flush_done = sfc_ev_rxq_flush_done,
468 .eec_rxq_flush_failed = sfc_ev_rxq_flush_failed,
469 .eec_txq_flush_done = sfc_ev_nop_txq_flush_done,
470 .eec_software = sfc_ev_software,
471 .eec_sram = sfc_ev_sram,
472 .eec_wake_up = sfc_ev_wake_up,
473 .eec_timer = sfc_ev_timer,
474 .eec_link_change = sfc_ev_nop_link_change,
477 static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx = {
478 .eec_initialized = sfc_ev_initialized,
479 .eec_rx = sfc_ev_nop_rx,
481 .eec_exception = sfc_ev_exception,
482 .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done,
483 .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed,
484 .eec_txq_flush_done = sfc_ev_txq_flush_done,
485 .eec_software = sfc_ev_software,
486 .eec_sram = sfc_ev_sram,
487 .eec_wake_up = sfc_ev_wake_up,
488 .eec_timer = sfc_ev_timer,
489 .eec_link_change = sfc_ev_nop_link_change,
492 static const efx_ev_callbacks_t sfc_ev_callbacks_dp_tx = {
493 .eec_initialized = sfc_ev_initialized,
494 .eec_rx = sfc_ev_nop_rx,
495 .eec_tx = sfc_ev_dp_tx,
496 .eec_exception = sfc_ev_exception,
497 .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done,
498 .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed,
499 .eec_txq_flush_done = sfc_ev_txq_flush_done,
500 .eec_software = sfc_ev_software,
501 .eec_sram = sfc_ev_sram,
502 .eec_wake_up = sfc_ev_wake_up,
503 .eec_timer = sfc_ev_timer,
504 .eec_link_change = sfc_ev_nop_link_change,
509 sfc_ev_qpoll(struct sfc_evq *evq)
511 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED ||
512 evq->init_state == SFC_EVQ_STARTING);
514 /* Synchronize the DMA memory for reading not required */
516 efx_ev_qpoll(evq->common, &evq->read_ptr, evq->callbacks, evq);
518 if (unlikely(evq->exception) && sfc_adapter_trylock(evq->sa)) {
519 struct sfc_adapter *sa = evq->sa;
522 if (evq->dp_rxq != NULL) {
523 unsigned int rxq_sw_index;
525 rxq_sw_index = evq->dp_rxq->dpq.queue_id;
528 "restart RxQ %u because of exception on its EvQ %u",
529 rxq_sw_index, evq->evq_index);
531 sfc_rx_qstop(sa, rxq_sw_index);
532 rc = sfc_rx_qstart(sa, rxq_sw_index);
534 sfc_err(sa, "cannot restart RxQ %u",
538 if (evq->dp_txq != NULL) {
539 unsigned int txq_sw_index;
541 txq_sw_index = evq->dp_txq->dpq.queue_id;
544 "restart TxQ %u because of exception on its EvQ %u",
545 txq_sw_index, evq->evq_index);
547 sfc_tx_qstop(sa, txq_sw_index);
548 rc = sfc_tx_qstart(sa, txq_sw_index);
550 sfc_err(sa, "cannot restart TxQ %u",
555 sfc_panic(sa, "unrecoverable exception on EvQ %u",
558 sfc_adapter_unlock(sa);
561 /* Poll-mode driver does not re-prime the event queue for interrupts */
565 sfc_ev_mgmt_qpoll(struct sfc_adapter *sa)
567 if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) {
568 struct sfc_evq *mgmt_evq = sa->mgmt_evq;
570 if (mgmt_evq->init_state == SFC_EVQ_STARTED)
571 sfc_ev_qpoll(mgmt_evq);
573 rte_spinlock_unlock(&sa->mgmt_evq_lock);
578 sfc_ev_qprime(struct sfc_evq *evq)
580 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED);
581 return efx_ev_qprime(evq->common, evq->read_ptr);
584 /* Event queue HW index allocation scheme is described in sfc_ev.h. */
586 sfc_ev_qstart(struct sfc_evq *evq, unsigned int hw_index)
588 struct sfc_adapter *sa = evq->sa;
590 uint32_t evq_flags = sa->evq_flags;
591 unsigned int total_delay_us;
592 unsigned int delay_us;
595 sfc_log_init(sa, "hw_index=%u", hw_index);
599 evq->evq_index = hw_index;
601 /* Clear all events */
602 (void)memset((void *)esmp->esm_base, 0xff, EFX_EVQ_SIZE(evq->entries));
604 if (sa->intr.lsc_intr && hw_index == sa->mgmt_evq_index)
605 evq_flags |= EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
607 evq_flags |= EFX_EVQ_FLAGS_NOTIFY_DISABLED;
609 /* Create the common code event queue */
610 rc = efx_ev_qcreate(sa->nic, hw_index, esmp, evq->entries,
611 0 /* unused on EF10 */, 0, evq_flags,
614 goto fail_ev_qcreate;
616 SFC_ASSERT(evq->dp_rxq == NULL || evq->dp_txq == NULL);
617 if (evq->dp_rxq != 0) {
618 if (strcmp(sa->dp_rx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0)
619 evq->callbacks = &sfc_ev_callbacks_efx_rx;
621 evq->callbacks = &sfc_ev_callbacks_dp_rx;
622 } else if (evq->dp_txq != 0) {
623 if (strcmp(sa->dp_tx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0)
624 evq->callbacks = &sfc_ev_callbacks_efx_tx;
626 evq->callbacks = &sfc_ev_callbacks_dp_tx;
628 evq->callbacks = &sfc_ev_callbacks;
631 evq->init_state = SFC_EVQ_STARTING;
633 /* Wait for the initialization event */
635 delay_us = SFC_EVQ_INIT_BACKOFF_START_US;
637 (void)sfc_ev_qpoll(evq);
639 /* Check to see if the initialization complete indication
640 * posted by the hardware.
642 if (evq->init_state == SFC_EVQ_STARTED)
645 /* Give event queue some time to init */
646 rte_delay_us(delay_us);
648 total_delay_us += delay_us;
650 /* Exponential backoff */
652 if (delay_us > SFC_EVQ_INIT_BACKOFF_MAX_US)
653 delay_us = SFC_EVQ_INIT_BACKOFF_MAX_US;
655 } while (total_delay_us < SFC_EVQ_INIT_TIMEOUT_US);
664 evq->init_state = SFC_EVQ_INITIALIZED;
665 efx_ev_qdestroy(evq->common);
668 sfc_log_init(sa, "failed %d", rc);
673 sfc_ev_qstop(struct sfc_evq *evq)
678 sfc_log_init(evq->sa, "hw_index=%u", evq->evq_index);
680 if (evq->init_state != SFC_EVQ_STARTED)
683 evq->init_state = SFC_EVQ_INITIALIZED;
684 evq->callbacks = NULL;
686 evq->exception = B_FALSE;
688 efx_ev_qdestroy(evq->common);
694 sfc_ev_mgmt_periodic_qpoll(void *arg)
696 struct sfc_adapter *sa = arg;
699 sfc_ev_mgmt_qpoll(sa);
701 rc = rte_eal_alarm_set(SFC_MGMT_EV_QPOLL_PERIOD_US,
702 sfc_ev_mgmt_periodic_qpoll, sa);
703 if (rc == -ENOTSUP) {
704 sfc_warn(sa, "alarms are not supported");
705 sfc_warn(sa, "management EVQ must be polled indirectly using no-wait link status update");
706 } else if (rc != 0) {
708 "cannot rearm management EVQ polling alarm (rc=%d)",
714 sfc_ev_mgmt_periodic_qpoll_start(struct sfc_adapter *sa)
716 sfc_ev_mgmt_periodic_qpoll(sa);
720 sfc_ev_mgmt_periodic_qpoll_stop(struct sfc_adapter *sa)
722 rte_eal_alarm_cancel(sfc_ev_mgmt_periodic_qpoll, sa);
726 sfc_ev_start(struct sfc_adapter *sa)
730 sfc_log_init(sa, "entry");
732 rc = efx_ev_init(sa->nic);
736 /* Start management EVQ used for global events */
737 rte_spinlock_lock(&sa->mgmt_evq_lock);
739 rc = sfc_ev_qstart(sa->mgmt_evq, sa->mgmt_evq_index);
741 goto fail_mgmt_evq_start;
743 if (sa->intr.lsc_intr) {
744 rc = sfc_ev_qprime(sa->mgmt_evq);
746 goto fail_evq0_prime;
749 rte_spinlock_unlock(&sa->mgmt_evq_lock);
752 * Start management EVQ polling. If interrupts are disabled
753 * (not used), it is required to process link status change
754 * and other device level events to avoid unrecoverable
755 * error because the event queue overflow.
757 sfc_ev_mgmt_periodic_qpoll_start(sa);
760 * Rx/Tx event queues are started/stopped when corresponding
761 * Rx/Tx queue is started/stopped.
767 sfc_ev_qstop(sa->mgmt_evq);
770 rte_spinlock_unlock(&sa->mgmt_evq_lock);
771 efx_ev_fini(sa->nic);
774 sfc_log_init(sa, "failed %d", rc);
779 sfc_ev_stop(struct sfc_adapter *sa)
781 sfc_log_init(sa, "entry");
783 sfc_ev_mgmt_periodic_qpoll_stop(sa);
785 rte_spinlock_lock(&sa->mgmt_evq_lock);
786 sfc_ev_qstop(sa->mgmt_evq);
787 rte_spinlock_unlock(&sa->mgmt_evq_lock);
789 efx_ev_fini(sa->nic);
793 sfc_ev_qinit(struct sfc_adapter *sa,
794 enum sfc_evq_type type, unsigned int type_index,
795 unsigned int entries, int socket_id, struct sfc_evq **evqp)
800 sfc_log_init(sa, "type=%s type_index=%u",
801 sfc_evq_type2str(type), type_index);
803 SFC_ASSERT(rte_is_power_of_2(entries));
806 evq = rte_zmalloc_socket("sfc-evq", sizeof(*evq), RTE_CACHE_LINE_SIZE,
813 evq->entries = entries;
815 /* Allocate DMA space */
816 rc = sfc_dma_alloc(sa, sfc_evq_type2str(type), type_index,
817 EFX_EVQ_SIZE(evq->entries), socket_id, &evq->mem);
821 evq->init_state = SFC_EVQ_INITIALIZED;
834 sfc_log_init(sa, "failed %d", rc);
839 sfc_ev_qfini(struct sfc_evq *evq)
841 struct sfc_adapter *sa = evq->sa;
843 SFC_ASSERT(evq->init_state == SFC_EVQ_INITIALIZED);
845 sfc_dma_free(sa, &evq->mem);
849 SFC_ASSERT(sa->evq_count > 0);
854 sfc_kvarg_perf_profile_handler(__rte_unused const char *key,
855 const char *value_str, void *opaque)
857 uint64_t *value = opaque;
859 if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_THROUGHPUT) == 0)
860 *value = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
861 else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_LOW_LATENCY) == 0)
862 *value = EFX_EVQ_FLAGS_TYPE_LOW_LATENCY;
863 else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_AUTO) == 0)
864 *value = EFX_EVQ_FLAGS_TYPE_AUTO;
872 sfc_ev_attach(struct sfc_adapter *sa)
876 sfc_log_init(sa, "entry");
878 sa->evq_flags = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
879 rc = sfc_kvargs_process(sa, SFC_KVARG_PERF_PROFILE,
880 sfc_kvarg_perf_profile_handler,
883 sfc_err(sa, "invalid %s parameter value",
884 SFC_KVARG_PERF_PROFILE);
885 goto fail_kvarg_perf_profile;
888 sa->mgmt_evq_index = 0;
889 rte_spinlock_init(&sa->mgmt_evq_lock);
891 rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_MGMT, 0, SFC_MGMT_EVQ_ENTRIES,
892 sa->socket_id, &sa->mgmt_evq);
894 goto fail_mgmt_evq_init;
897 * Rx/Tx event queues are created/destroyed when corresponding
898 * Rx/Tx queue is created/destroyed.
905 fail_kvarg_perf_profile:
906 sfc_log_init(sa, "failed %d", rc);
911 sfc_ev_detach(struct sfc_adapter *sa)
913 sfc_log_init(sa, "entry");
915 sfc_ev_qfini(sa->mgmt_evq);
917 if (sa->evq_count != 0)
918 sfc_err(sa, "%u EvQs are not destroyed before detach",