4 * Copyright (c) 2016-2017 Solarflare Communications Inc.
7 * This software was jointly developed between OKTET Labs (under contract
8 * for Solarflare) and Solarflare Communications, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are met:
13 * 1. Redistributions of source code must retain the above copyright notice,
14 * this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include <rte_debug.h>
33 #include <rte_cycles.h>
34 #include <rte_alarm.h>
35 #include <rte_branch_prediction.h>
40 #include "sfc_debug.h"
45 #include "sfc_kvargs.h"
48 /* Initial delay when waiting for event queue init complete event */
49 #define SFC_EVQ_INIT_BACKOFF_START_US (1)
50 /* Maximum delay between event queue polling attempts */
51 #define SFC_EVQ_INIT_BACKOFF_MAX_US (10 * 1000)
52 /* Event queue init approx timeout */
53 #define SFC_EVQ_INIT_TIMEOUT_US (2 * US_PER_S)
55 /* Management event queue polling period in microseconds */
56 #define SFC_MGMT_EV_QPOLL_PERIOD_US (US_PER_S)
60 sfc_ev_initialized(void *arg)
62 struct sfc_evq *evq = arg;
64 /* Init done events may be duplicated on SFN7xxx (SFC bug 31631) */
65 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTING ||
66 evq->init_state == SFC_EVQ_STARTED);
68 evq->init_state = SFC_EVQ_STARTED;
74 sfc_ev_nop_rx(void *arg, uint32_t label, uint32_t id,
75 uint32_t size, uint16_t flags)
77 struct sfc_evq *evq = arg;
80 "EVQ %u unexpected Rx event label=%u id=%#x size=%u flags=%#x",
81 evq->evq_index, label, id, size, flags);
86 sfc_ev_efx_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
87 uint32_t size, uint16_t flags)
89 struct sfc_evq *evq = arg;
90 struct sfc_efx_rxq *rxq;
92 unsigned int pending_id;
95 struct sfc_efx_rx_sw_desc *rxd;
97 if (unlikely(evq->exception))
100 rxq = sfc_efx_rxq_by_dp_rxq(evq->dp_rxq);
102 SFC_ASSERT(rxq != NULL);
103 SFC_ASSERT(rxq->evq == evq);
104 SFC_ASSERT(rxq->flags & SFC_EFX_RXQ_FLAG_STARTED);
106 stop = (id + 1) & rxq->ptr_mask;
107 pending_id = rxq->pending & rxq->ptr_mask;
108 delta = (stop >= pending_id) ? (stop - pending_id) :
109 (rxq->ptr_mask + 1 - pending_id + stop);
113 * Rx event with no new descriptors done and zero length
114 * is used to abort scattered packet when there is no room
117 if (unlikely(size != 0)) {
118 evq->exception = B_TRUE;
120 "EVQ %u RxQ %u invalid RX abort "
121 "(id=%#x size=%u flags=%#x); needs restart",
122 evq->evq_index, rxq->dp.dpq.queue_id,
127 /* Add discard flag to the first fragment */
128 rxq->sw_desc[pending_id].flags |= EFX_DISCARD;
129 /* Remove continue flag from the last fragment */
130 rxq->sw_desc[id].flags &= ~EFX_PKT_CONT;
131 } else if (unlikely(delta > rxq->batch_max)) {
132 evq->exception = B_TRUE;
135 "EVQ %u RxQ %u completion out of order "
136 "(id=%#x delta=%u flags=%#x); needs restart",
137 evq->evq_index, rxq->dp.dpq.queue_id,
143 for (i = pending_id; i != stop; i = (i + 1) & rxq->ptr_mask) {
144 rxd = &rxq->sw_desc[i];
148 SFC_ASSERT(size < (1 << 16));
149 rxd->size = (uint16_t)size;
152 rxq->pending += delta;
159 sfc_ev_nop_tx(void *arg, uint32_t label, uint32_t id)
161 struct sfc_evq *evq = arg;
163 sfc_err(evq->sa, "EVQ %u unexpected Tx event label=%u id=%#x",
164 evq->evq_index, label, id);
169 sfc_ev_tx(void *arg, __rte_unused uint32_t label, uint32_t id)
171 struct sfc_evq *evq = arg;
178 SFC_ASSERT(txq != NULL);
179 SFC_ASSERT(txq->evq == evq);
181 if (unlikely((txq->state & SFC_TXQ_STARTED) == 0))
184 stop = (id + 1) & txq->ptr_mask;
185 id = txq->pending & txq->ptr_mask;
187 delta = (stop >= id) ? (stop - id) : (txq->ptr_mask + 1 - id + stop);
189 txq->pending += delta;
196 sfc_ev_exception(void *arg, __rte_unused uint32_t code,
197 __rte_unused uint32_t data)
199 struct sfc_evq *evq = arg;
201 if (code == EFX_EXCEPTION_UNKNOWN_SENSOREVT)
204 evq->exception = B_TRUE;
206 "hardware exception %s (code=%u, data=%#x) on EVQ %u;"
208 (code == EFX_EXCEPTION_RX_RECOVERY) ? "RX_RECOVERY" :
209 (code == EFX_EXCEPTION_RX_DSC_ERROR) ? "RX_DSC_ERROR" :
210 (code == EFX_EXCEPTION_TX_DSC_ERROR) ? "TX_DSC_ERROR" :
211 (code == EFX_EXCEPTION_FWALERT_SRAM) ? "FWALERT_SRAM" :
212 (code == EFX_EXCEPTION_UNKNOWN_FWALERT) ? "UNKNOWN_FWALERT" :
213 (code == EFX_EXCEPTION_RX_ERROR) ? "RX_ERROR" :
214 (code == EFX_EXCEPTION_TX_ERROR) ? "TX_ERROR" :
215 (code == EFX_EXCEPTION_EV_ERROR) ? "EV_ERROR" :
217 code, data, evq->evq_index);
223 sfc_ev_nop_rxq_flush_done(void *arg, uint32_t rxq_hw_index)
225 struct sfc_evq *evq = arg;
227 sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush done",
228 evq->evq_index, rxq_hw_index);
233 sfc_ev_rxq_flush_done(void *arg, __rte_unused uint32_t rxq_hw_index)
235 struct sfc_evq *evq = arg;
236 struct sfc_dp_rxq *dp_rxq;
239 dp_rxq = evq->dp_rxq;
240 SFC_ASSERT(dp_rxq != NULL);
242 rxq = sfc_rxq_by_dp_rxq(dp_rxq);
243 SFC_ASSERT(rxq != NULL);
244 SFC_ASSERT(rxq->hw_index == rxq_hw_index);
245 SFC_ASSERT(rxq->evq == evq);
246 sfc_rx_qflush_done(rxq);
252 sfc_ev_nop_rxq_flush_failed(void *arg, uint32_t rxq_hw_index)
254 struct sfc_evq *evq = arg;
256 sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush failed",
257 evq->evq_index, rxq_hw_index);
262 sfc_ev_rxq_flush_failed(void *arg, __rte_unused uint32_t rxq_hw_index)
264 struct sfc_evq *evq = arg;
265 struct sfc_dp_rxq *dp_rxq;
268 dp_rxq = evq->dp_rxq;
269 SFC_ASSERT(dp_rxq != NULL);
271 rxq = sfc_rxq_by_dp_rxq(dp_rxq);
272 SFC_ASSERT(rxq != NULL);
273 SFC_ASSERT(rxq->hw_index == rxq_hw_index);
274 SFC_ASSERT(rxq->evq == evq);
275 sfc_rx_qflush_failed(rxq);
281 sfc_ev_nop_txq_flush_done(void *arg, uint32_t txq_hw_index)
283 struct sfc_evq *evq = arg;
285 sfc_err(evq->sa, "EVQ %u unexpected TxQ %u flush done",
286 evq->evq_index, txq_hw_index);
291 sfc_ev_txq_flush_done(void *arg, __rte_unused uint32_t txq_hw_index)
293 struct sfc_evq *evq = arg;
297 SFC_ASSERT(txq != NULL);
298 SFC_ASSERT(txq->hw_index == txq_hw_index);
299 SFC_ASSERT(txq->evq == evq);
300 sfc_tx_qflush_done(txq);
306 sfc_ev_software(void *arg, uint16_t magic)
308 struct sfc_evq *evq = arg;
310 sfc_err(evq->sa, "EVQ %u unexpected software event magic=%#.4x",
311 evq->evq_index, magic);
316 sfc_ev_sram(void *arg, uint32_t code)
318 struct sfc_evq *evq = arg;
320 sfc_err(evq->sa, "EVQ %u unexpected SRAM event code=%u",
321 evq->evq_index, code);
326 sfc_ev_wake_up(void *arg, uint32_t index)
328 struct sfc_evq *evq = arg;
330 sfc_err(evq->sa, "EVQ %u unexpected wake up event index=%u",
331 evq->evq_index, index);
336 sfc_ev_timer(void *arg, uint32_t index)
338 struct sfc_evq *evq = arg;
340 sfc_err(evq->sa, "EVQ %u unexpected timer event index=%u",
341 evq->evq_index, index);
346 sfc_ev_nop_link_change(void *arg, __rte_unused efx_link_mode_t link_mode)
348 struct sfc_evq *evq = arg;
350 sfc_err(evq->sa, "EVQ %u unexpected link change event",
356 sfc_ev_link_change(void *arg, efx_link_mode_t link_mode)
358 struct sfc_evq *evq = arg;
359 struct sfc_adapter *sa = evq->sa;
360 struct rte_eth_link *dev_link = &sa->eth_dev->data->dev_link;
361 struct rte_eth_link new_link;
362 uint64_t new_link_u64;
363 uint64_t old_link_u64;
365 EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
367 sfc_port_link_mode_to_info(link_mode, &new_link);
369 new_link_u64 = *(uint64_t *)&new_link;
371 old_link_u64 = rte_atomic64_read((rte_atomic64_t *)dev_link);
372 if (old_link_u64 == new_link_u64)
375 if (rte_atomic64_cmpset((volatile uint64_t *)dev_link,
376 old_link_u64, new_link_u64)) {
377 evq->sa->port.lsc_seq++;
385 static const efx_ev_callbacks_t sfc_ev_callbacks = {
386 .eec_initialized = sfc_ev_initialized,
387 .eec_rx = sfc_ev_nop_rx,
388 .eec_tx = sfc_ev_nop_tx,
389 .eec_exception = sfc_ev_exception,
390 .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done,
391 .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed,
392 .eec_txq_flush_done = sfc_ev_nop_txq_flush_done,
393 .eec_software = sfc_ev_software,
394 .eec_sram = sfc_ev_sram,
395 .eec_wake_up = sfc_ev_wake_up,
396 .eec_timer = sfc_ev_timer,
397 .eec_link_change = sfc_ev_link_change,
400 static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = {
401 .eec_initialized = sfc_ev_initialized,
402 .eec_rx = sfc_ev_efx_rx,
403 .eec_tx = sfc_ev_nop_tx,
404 .eec_exception = sfc_ev_exception,
405 .eec_rxq_flush_done = sfc_ev_rxq_flush_done,
406 .eec_rxq_flush_failed = sfc_ev_rxq_flush_failed,
407 .eec_txq_flush_done = sfc_ev_nop_txq_flush_done,
408 .eec_software = sfc_ev_software,
409 .eec_sram = sfc_ev_sram,
410 .eec_wake_up = sfc_ev_wake_up,
411 .eec_timer = sfc_ev_timer,
412 .eec_link_change = sfc_ev_nop_link_change,
415 static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = {
416 .eec_initialized = sfc_ev_initialized,
417 .eec_rx = sfc_ev_nop_rx,
418 .eec_tx = sfc_ev_nop_tx,
419 .eec_exception = sfc_ev_exception,
420 .eec_rxq_flush_done = sfc_ev_rxq_flush_done,
421 .eec_rxq_flush_failed = sfc_ev_rxq_flush_failed,
422 .eec_txq_flush_done = sfc_ev_nop_txq_flush_done,
423 .eec_software = sfc_ev_software,
424 .eec_sram = sfc_ev_sram,
425 .eec_wake_up = sfc_ev_wake_up,
426 .eec_timer = sfc_ev_timer,
427 .eec_link_change = sfc_ev_nop_link_change,
430 static const efx_ev_callbacks_t sfc_ev_callbacks_tx = {
431 .eec_initialized = sfc_ev_initialized,
432 .eec_rx = sfc_ev_nop_rx,
434 .eec_exception = sfc_ev_exception,
435 .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done,
436 .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed,
437 .eec_txq_flush_done = sfc_ev_txq_flush_done,
438 .eec_software = sfc_ev_software,
439 .eec_sram = sfc_ev_sram,
440 .eec_wake_up = sfc_ev_wake_up,
441 .eec_timer = sfc_ev_timer,
442 .eec_link_change = sfc_ev_nop_link_change,
447 sfc_ev_qpoll(struct sfc_evq *evq)
449 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED ||
450 evq->init_state == SFC_EVQ_STARTING);
452 /* Synchronize the DMA memory for reading not required */
454 efx_ev_qpoll(evq->common, &evq->read_ptr, evq->callbacks, evq);
456 if (unlikely(evq->exception) && sfc_adapter_trylock(evq->sa)) {
457 struct sfc_adapter *sa = evq->sa;
460 if (evq->dp_rxq != NULL) {
461 unsigned int rxq_sw_index;
463 rxq_sw_index = evq->dp_rxq->dpq.queue_id;
466 "restart RxQ %u because of exception on its EvQ %u",
467 rxq_sw_index, evq->evq_index);
469 sfc_rx_qstop(sa, rxq_sw_index);
470 rc = sfc_rx_qstart(sa, rxq_sw_index);
472 sfc_err(sa, "cannot restart RxQ %u",
476 if (evq->txq != NULL) {
477 unsigned int txq_sw_index = sfc_txq_sw_index(evq->txq);
480 "restart TxQ %u because of exception on its EvQ %u",
481 txq_sw_index, evq->evq_index);
483 sfc_tx_qstop(sa, txq_sw_index);
484 rc = sfc_tx_qstart(sa, txq_sw_index);
486 sfc_err(sa, "cannot restart TxQ %u",
491 sfc_panic(sa, "unrecoverable exception on EvQ %u",
494 sfc_adapter_unlock(sa);
497 /* Poll-mode driver does not re-prime the event queue for interrupts */
501 sfc_ev_mgmt_qpoll(struct sfc_adapter *sa)
503 if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) {
504 struct sfc_evq *mgmt_evq = sa->evq_info[sa->mgmt_evq_index].evq;
506 if (mgmt_evq->init_state == SFC_EVQ_STARTED)
507 sfc_ev_qpoll(mgmt_evq);
509 rte_spinlock_unlock(&sa->mgmt_evq_lock);
514 sfc_ev_qprime(struct sfc_evq *evq)
516 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED);
517 return efx_ev_qprime(evq->common, evq->read_ptr);
521 sfc_ev_qstart(struct sfc_adapter *sa, unsigned int sw_index)
523 const struct sfc_evq_info *evq_info;
526 unsigned int total_delay_us;
527 unsigned int delay_us;
530 sfc_log_init(sa, "sw_index=%u", sw_index);
532 evq_info = &sa->evq_info[sw_index];
536 /* Clear all events */
537 (void)memset((void *)esmp->esm_base, 0xff,
538 EFX_EVQ_SIZE(evq_info->entries));
540 /* Create the common code event queue */
541 rc = efx_ev_qcreate(sa->nic, sw_index, esmp, evq_info->entries,
542 0 /* unused on EF10 */, 0, evq_info->flags,
545 goto fail_ev_qcreate;
547 SFC_ASSERT(evq->dp_rxq == NULL || evq->txq == NULL);
548 if (evq->dp_rxq != 0) {
549 if (strcmp(sa->dp_rx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0)
550 evq->callbacks = &sfc_ev_callbacks_efx_rx;
552 evq->callbacks = &sfc_ev_callbacks_dp_rx;
553 } else if (evq->txq != 0) {
554 evq->callbacks = &sfc_ev_callbacks_tx;
556 evq->callbacks = &sfc_ev_callbacks;
559 evq->init_state = SFC_EVQ_STARTING;
561 /* Wait for the initialization event */
563 delay_us = SFC_EVQ_INIT_BACKOFF_START_US;
565 (void)sfc_ev_qpoll(evq);
567 /* Check to see if the initialization complete indication
568 * posted by the hardware.
570 if (evq->init_state == SFC_EVQ_STARTED)
573 /* Give event queue some time to init */
574 rte_delay_us(delay_us);
576 total_delay_us += delay_us;
578 /* Exponential backoff */
580 if (delay_us > SFC_EVQ_INIT_BACKOFF_MAX_US)
581 delay_us = SFC_EVQ_INIT_BACKOFF_MAX_US;
583 } while (total_delay_us < SFC_EVQ_INIT_TIMEOUT_US);
592 evq->init_state = SFC_EVQ_INITIALIZED;
593 efx_ev_qdestroy(evq->common);
596 sfc_log_init(sa, "failed %d", rc);
601 sfc_ev_qstop(struct sfc_adapter *sa, unsigned int sw_index)
603 const struct sfc_evq_info *evq_info;
606 sfc_log_init(sa, "sw_index=%u", sw_index);
608 SFC_ASSERT(sw_index < sa->evq_count);
610 evq_info = &sa->evq_info[sw_index];
613 if (evq == NULL || evq->init_state != SFC_EVQ_STARTED)
616 evq->init_state = SFC_EVQ_INITIALIZED;
617 evq->callbacks = NULL;
619 evq->exception = B_FALSE;
621 efx_ev_qdestroy(evq->common);
625 sfc_ev_mgmt_periodic_qpoll(void *arg)
627 struct sfc_adapter *sa = arg;
630 sfc_ev_mgmt_qpoll(sa);
632 rc = rte_eal_alarm_set(SFC_MGMT_EV_QPOLL_PERIOD_US,
633 sfc_ev_mgmt_periodic_qpoll, sa);
634 if (rc == -ENOTSUP) {
635 sfc_warn(sa, "alarms are not supported");
636 sfc_warn(sa, "management EVQ must be polled indirectly using no-wait link status update");
637 } else if (rc != 0) {
639 "cannot rearm management EVQ polling alarm (rc=%d)",
645 sfc_ev_mgmt_periodic_qpoll_start(struct sfc_adapter *sa)
647 sfc_ev_mgmt_periodic_qpoll(sa);
651 sfc_ev_mgmt_periodic_qpoll_stop(struct sfc_adapter *sa)
653 rte_eal_alarm_cancel(sfc_ev_mgmt_periodic_qpoll, sa);
657 sfc_ev_start(struct sfc_adapter *sa)
661 sfc_log_init(sa, "entry");
663 rc = efx_ev_init(sa->nic);
667 /* Start management EVQ used for global events */
668 rte_spinlock_lock(&sa->mgmt_evq_lock);
670 rc = sfc_ev_qstart(sa, sa->mgmt_evq_index);
672 goto fail_mgmt_evq_start;
674 if (sa->intr.lsc_intr) {
675 rc = sfc_ev_qprime(sa->evq_info[sa->mgmt_evq_index].evq);
677 goto fail_evq0_prime;
680 rte_spinlock_unlock(&sa->mgmt_evq_lock);
683 * Start management EVQ polling. If interrupts are disabled
684 * (not used), it is required to process link status change
685 * and other device level events to avoid unrecoverable
686 * error because the event queue overflow.
688 sfc_ev_mgmt_periodic_qpoll_start(sa);
691 * Rx/Tx event queues are started/stopped when corresponding
692 * Rx/Tx queue is started/stopped.
701 rte_spinlock_unlock(&sa->mgmt_evq_lock);
702 efx_ev_fini(sa->nic);
705 sfc_log_init(sa, "failed %d", rc);
710 sfc_ev_stop(struct sfc_adapter *sa)
712 unsigned int sw_index;
714 sfc_log_init(sa, "entry");
716 sfc_ev_mgmt_periodic_qpoll_stop(sa);
718 /* Make sure that all event queues are stopped */
719 sw_index = sa->evq_count;
720 while (sw_index-- > 0) {
721 if (sw_index == sa->mgmt_evq_index) {
722 /* Locks are required for the management EVQ */
723 rte_spinlock_lock(&sa->mgmt_evq_lock);
724 sfc_ev_qstop(sa, sa->mgmt_evq_index);
725 rte_spinlock_unlock(&sa->mgmt_evq_lock);
727 sfc_ev_qstop(sa, sw_index);
731 efx_ev_fini(sa->nic);
735 sfc_ev_qinit(struct sfc_adapter *sa, unsigned int sw_index,
736 unsigned int entries, int socket_id)
738 struct sfc_evq_info *evq_info;
742 sfc_log_init(sa, "sw_index=%u", sw_index);
744 evq_info = &sa->evq_info[sw_index];
746 SFC_ASSERT(rte_is_power_of_2(entries));
747 SFC_ASSERT(entries <= evq_info->max_entries);
748 evq_info->entries = entries;
750 evq = rte_zmalloc_socket("sfc-evq", sizeof(*evq), RTE_CACHE_LINE_SIZE,
756 evq->evq_index = sw_index;
758 /* Allocate DMA space */
759 rc = sfc_dma_alloc(sa, "evq", sw_index, EFX_EVQ_SIZE(evq_info->entries),
760 socket_id, &evq->mem);
764 evq->init_state = SFC_EVQ_INITIALIZED;
772 sfc_ev_qfini(struct sfc_adapter *sa, unsigned int sw_index)
776 sfc_log_init(sa, "sw_index=%u", sw_index);
778 evq = sa->evq_info[sw_index].evq;
780 SFC_ASSERT(evq->init_state == SFC_EVQ_INITIALIZED);
782 sa->evq_info[sw_index].evq = NULL;
784 sfc_dma_free(sa, &evq->mem);
790 sfc_ev_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
792 struct sfc_evq_info *evq_info = &sa->evq_info[sw_index];
793 unsigned int max_entries;
795 sfc_log_init(sa, "sw_index=%u", sw_index);
797 max_entries = sfc_evq_max_entries(sa, sw_index);
798 SFC_ASSERT(rte_is_power_of_2(max_entries));
800 evq_info->max_entries = max_entries;
801 evq_info->flags = sa->evq_flags |
802 ((sa->intr.lsc_intr && sw_index == sa->mgmt_evq_index) ?
803 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT :
804 EFX_EVQ_FLAGS_NOTIFY_DISABLED);
810 sfc_kvarg_perf_profile_handler(__rte_unused const char *key,
811 const char *value_str, void *opaque)
813 uint64_t *value = opaque;
815 if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_THROUGHPUT) == 0)
816 *value = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
817 else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_LOW_LATENCY) == 0)
818 *value = EFX_EVQ_FLAGS_TYPE_LOW_LATENCY;
819 else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_AUTO) == 0)
820 *value = EFX_EVQ_FLAGS_TYPE_AUTO;
828 sfc_ev_qfini_info(struct sfc_adapter *sa, unsigned int sw_index)
830 sfc_log_init(sa, "sw_index=%u", sw_index);
832 /* Nothing to cleanup */
836 sfc_ev_init(struct sfc_adapter *sa)
839 unsigned int sw_index;
841 sfc_log_init(sa, "entry");
843 sa->evq_flags = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
844 rc = sfc_kvargs_process(sa, SFC_KVARG_PERF_PROFILE,
845 sfc_kvarg_perf_profile_handler,
848 sfc_err(sa, "invalid %s parameter value",
849 SFC_KVARG_PERF_PROFILE);
850 goto fail_kvarg_perf_profile;
853 sa->evq_count = sfc_ev_qcount(sa);
854 sa->mgmt_evq_index = 0;
855 rte_spinlock_init(&sa->mgmt_evq_lock);
857 /* Allocate EVQ info array */
859 sa->evq_info = rte_calloc_socket("sfc-evqs", sa->evq_count,
860 sizeof(struct sfc_evq_info), 0,
862 if (sa->evq_info == NULL)
863 goto fail_evqs_alloc;
865 for (sw_index = 0; sw_index < sa->evq_count; ++sw_index) {
866 rc = sfc_ev_qinit_info(sa, sw_index);
868 goto fail_ev_qinit_info;
871 rc = sfc_ev_qinit(sa, sa->mgmt_evq_index, SFC_MGMT_EVQ_ENTRIES,
874 goto fail_mgmt_evq_init;
877 * Rx/Tx event queues are created/destroyed when corresponding
878 * Rx/Tx queue is created/destroyed.
885 while (sw_index-- > 0)
886 sfc_ev_qfini_info(sa, sw_index);
888 rte_free(sa->evq_info);
894 fail_kvarg_perf_profile:
895 sfc_log_init(sa, "failed %d", rc);
900 sfc_ev_fini(struct sfc_adapter *sa)
904 sfc_log_init(sa, "entry");
906 /* Cleanup all event queues */
907 sw_index = sa->evq_count;
908 while (--sw_index >= 0) {
909 if (sa->evq_info[sw_index].evq != NULL)
910 sfc_ev_qfini(sa, sw_index);
911 sfc_ev_qfini_info(sa, sw_index);
914 rte_free(sa->evq_info);