2 * Copyright (c) 2016 Solarflare Communications Inc.
5 * This software was jointly developed between OKTET Labs (under contract
6 * for Solarflare) and Solarflare Communications, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <rte_debug.h>
31 #include <rte_cycles.h>
32 #include <rte_alarm.h>
33 #include <rte_branch_prediction.h>
38 #include "sfc_debug.h"
43 #include "sfc_kvargs.h"
46 /* Initial delay when waiting for event queue init complete event */
47 #define SFC_EVQ_INIT_BACKOFF_START_US (1)
48 /* Maximum delay between event queue polling attempts */
49 #define SFC_EVQ_INIT_BACKOFF_MAX_US (10 * 1000)
50 /* Event queue init approx timeout */
51 #define SFC_EVQ_INIT_TIMEOUT_US (2 * US_PER_S)
53 /* Management event queue polling period in microseconds */
54 #define SFC_MGMT_EV_QPOLL_PERIOD_US (US_PER_S)
58 sfc_ev_initialized(void *arg)
60 struct sfc_evq *evq = arg;
62 /* Init done events may be duplicated on SFN7xxx (SFC bug 31631) */
63 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTING ||
64 evq->init_state == SFC_EVQ_STARTED);
66 evq->init_state = SFC_EVQ_STARTED;
72 sfc_ev_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
73 uint32_t size, uint16_t flags)
75 struct sfc_evq *evq = arg;
78 unsigned int pending_id;
81 struct sfc_rx_sw_desc *rxd;
83 if (unlikely(evq->exception))
88 SFC_ASSERT(rxq != NULL);
89 SFC_ASSERT(rxq->evq == evq);
90 SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
92 stop = (id + 1) & rxq->ptr_mask;
93 pending_id = rxq->pending & rxq->ptr_mask;
94 delta = (stop >= pending_id) ? (stop - pending_id) :
95 (rxq->ptr_mask + 1 - pending_id + stop);
99 * Rx event with no new descriptors done and zero length
100 * is used to abort scattered packet when there is no room
103 if (unlikely(size != 0)) {
104 evq->exception = B_TRUE;
106 "EVQ %u RxQ %u invalid RX abort "
107 "(id=%#x size=%u flags=%#x); needs restart\n",
108 evq->evq_index, sfc_rxq_sw_index(rxq),
113 /* Add discard flag to the first fragment */
114 rxq->sw_desc[pending_id].flags |= EFX_DISCARD;
115 /* Remove continue flag from the last fragment */
116 rxq->sw_desc[id].flags &= ~EFX_PKT_CONT;
117 } else if (unlikely(delta > rxq->batch_max)) {
118 evq->exception = B_TRUE;
121 "EVQ %u RxQ %u completion out of order "
122 "(id=%#x delta=%u flags=%#x); needs restart\n",
123 evq->evq_index, sfc_rxq_sw_index(rxq), id, delta,
129 for (i = pending_id; i != stop; i = (i + 1) & rxq->ptr_mask) {
130 rxd = &rxq->sw_desc[i];
134 SFC_ASSERT(size < (1 << 16));
135 rxd->size = (uint16_t)size;
138 rxq->pending += delta;
145 sfc_ev_tx(void *arg, __rte_unused uint32_t label, uint32_t id)
147 struct sfc_evq *evq = arg;
154 SFC_ASSERT(txq != NULL);
155 SFC_ASSERT(txq->evq == evq);
157 if (unlikely((txq->state & SFC_TXQ_STARTED) == 0))
160 stop = (id + 1) & txq->ptr_mask;
161 id = txq->pending & txq->ptr_mask;
163 delta = (stop >= id) ? (stop - id) : (txq->ptr_mask + 1 - id + stop);
165 txq->pending += delta;
172 sfc_ev_exception(void *arg, __rte_unused uint32_t code,
173 __rte_unused uint32_t data)
175 struct sfc_evq *evq = arg;
177 if (code == EFX_EXCEPTION_UNKNOWN_SENSOREVT)
180 evq->exception = B_TRUE;
182 "hardware exception %s (code=%u, data=%#x) on EVQ %u;"
184 (code == EFX_EXCEPTION_RX_RECOVERY) ? "RX_RECOVERY" :
185 (code == EFX_EXCEPTION_RX_DSC_ERROR) ? "RX_DSC_ERROR" :
186 (code == EFX_EXCEPTION_TX_DSC_ERROR) ? "TX_DSC_ERROR" :
187 (code == EFX_EXCEPTION_FWALERT_SRAM) ? "FWALERT_SRAM" :
188 (code == EFX_EXCEPTION_UNKNOWN_FWALERT) ? "UNKNOWN_FWALERT" :
189 (code == EFX_EXCEPTION_RX_ERROR) ? "RX_ERROR" :
190 (code == EFX_EXCEPTION_TX_ERROR) ? "TX_ERROR" :
191 (code == EFX_EXCEPTION_EV_ERROR) ? "EV_ERROR" :
193 code, data, evq->evq_index);
199 sfc_ev_rxq_flush_done(void *arg, __rte_unused uint32_t rxq_hw_index)
201 struct sfc_evq *evq = arg;
205 SFC_ASSERT(rxq != NULL);
206 SFC_ASSERT(rxq->hw_index == rxq_hw_index);
207 SFC_ASSERT(rxq->evq == evq);
208 sfc_rx_qflush_done(rxq);
214 sfc_ev_rxq_flush_failed(void *arg, __rte_unused uint32_t rxq_hw_index)
216 struct sfc_evq *evq = arg;
220 SFC_ASSERT(rxq != NULL);
221 SFC_ASSERT(rxq->hw_index == rxq_hw_index);
222 SFC_ASSERT(rxq->evq == evq);
223 sfc_rx_qflush_failed(rxq);
229 sfc_ev_txq_flush_done(void *arg, __rte_unused uint32_t txq_hw_index)
231 struct sfc_evq *evq = arg;
235 SFC_ASSERT(txq != NULL);
236 SFC_ASSERT(txq->hw_index == txq_hw_index);
237 SFC_ASSERT(txq->evq == evq);
238 sfc_tx_qflush_done(txq);
244 sfc_ev_software(void *arg, uint16_t magic)
246 struct sfc_evq *evq = arg;
248 sfc_err(evq->sa, "EVQ %u unexpected software event magic=%#.4x",
249 evq->evq_index, magic);
254 sfc_ev_sram(void *arg, uint32_t code)
256 struct sfc_evq *evq = arg;
258 sfc_err(evq->sa, "EVQ %u unexpected SRAM event code=%u",
259 evq->evq_index, code);
264 sfc_ev_wake_up(void *arg, uint32_t index)
266 struct sfc_evq *evq = arg;
268 sfc_err(evq->sa, "EVQ %u unexpected wake up event index=%u",
269 evq->evq_index, index);
274 sfc_ev_timer(void *arg, uint32_t index)
276 struct sfc_evq *evq = arg;
278 sfc_err(evq->sa, "EVQ %u unexpected timer event index=%u",
279 evq->evq_index, index);
284 sfc_ev_link_change(void *arg, efx_link_mode_t link_mode)
286 struct sfc_evq *evq = arg;
287 struct sfc_adapter *sa = evq->sa;
288 struct rte_eth_link *dev_link = &sa->eth_dev->data->dev_link;
289 struct rte_eth_link new_link;
290 uint64_t new_link_u64;
291 uint64_t old_link_u64;
293 EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
295 sfc_port_link_mode_to_info(link_mode, &new_link);
297 new_link_u64 = *(uint64_t *)&new_link;
299 old_link_u64 = rte_atomic64_read((rte_atomic64_t *)dev_link);
300 if (old_link_u64 == new_link_u64)
303 if (rte_atomic64_cmpset((volatile uint64_t *)dev_link,
304 old_link_u64, new_link_u64)) {
305 evq->sa->port.lsc_seq++;
313 static const efx_ev_callbacks_t sfc_ev_callbacks = {
314 .eec_initialized = sfc_ev_initialized,
317 .eec_exception = sfc_ev_exception,
318 .eec_rxq_flush_done = sfc_ev_rxq_flush_done,
319 .eec_rxq_flush_failed = sfc_ev_rxq_flush_failed,
320 .eec_txq_flush_done = sfc_ev_txq_flush_done,
321 .eec_software = sfc_ev_software,
322 .eec_sram = sfc_ev_sram,
323 .eec_wake_up = sfc_ev_wake_up,
324 .eec_timer = sfc_ev_timer,
325 .eec_link_change = sfc_ev_link_change,
330 sfc_ev_qpoll(struct sfc_evq *evq)
332 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED ||
333 evq->init_state == SFC_EVQ_STARTING);
335 /* Synchronize the DMA memory for reading not required */
337 efx_ev_qpoll(evq->common, &evq->read_ptr, &sfc_ev_callbacks, evq);
339 if (unlikely(evq->exception) && sfc_adapter_trylock(evq->sa)) {
340 struct sfc_adapter *sa = evq->sa;
343 if ((evq->rxq != NULL) && (evq->rxq->state & SFC_RXQ_RUNNING)) {
344 unsigned int rxq_sw_index = sfc_rxq_sw_index(evq->rxq);
347 "restart RxQ %u because of exception on its EvQ %u",
348 rxq_sw_index, evq->evq_index);
350 sfc_rx_qstop(sa, rxq_sw_index);
351 rc = sfc_rx_qstart(sa, rxq_sw_index);
353 sfc_err(sa, "cannot restart RxQ %u",
357 if (evq->txq != NULL) {
358 unsigned int txq_sw_index = sfc_txq_sw_index(evq->txq);
361 "restart TxQ %u because of exception on its EvQ %u",
362 txq_sw_index, evq->evq_index);
364 sfc_tx_qstop(sa, txq_sw_index);
365 rc = sfc_tx_qstart(sa, txq_sw_index);
367 sfc_err(sa, "cannot restart TxQ %u",
372 sfc_panic(sa, "unrecoverable exception on EvQ %u",
375 sfc_adapter_unlock(sa);
378 /* Poll-mode driver does not re-prime the event queue for interrupts */
382 sfc_ev_mgmt_qpoll(struct sfc_adapter *sa)
384 if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) {
385 struct sfc_evq *mgmt_evq = sa->evq_info[sa->mgmt_evq_index].evq;
387 if (mgmt_evq->init_state == SFC_EVQ_STARTED)
388 sfc_ev_qpoll(mgmt_evq);
390 rte_spinlock_unlock(&sa->mgmt_evq_lock);
395 sfc_ev_qprime(struct sfc_evq *evq)
397 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED);
398 return efx_ev_qprime(evq->common, evq->read_ptr);
402 sfc_ev_qstart(struct sfc_adapter *sa, unsigned int sw_index)
404 const struct sfc_evq_info *evq_info;
407 unsigned int total_delay_us;
408 unsigned int delay_us;
411 sfc_log_init(sa, "sw_index=%u", sw_index);
413 evq_info = &sa->evq_info[sw_index];
417 /* Clear all events */
418 (void)memset((void *)esmp->esm_base, 0xff,
419 EFX_EVQ_SIZE(evq_info->entries));
421 /* Create the common code event queue */
422 rc = efx_ev_qcreate(sa->nic, sw_index, esmp, evq_info->entries,
423 0 /* unused on EF10 */, 0, evq_info->flags,
426 goto fail_ev_qcreate;
428 evq->init_state = SFC_EVQ_STARTING;
430 /* Wait for the initialization event */
432 delay_us = SFC_EVQ_INIT_BACKOFF_START_US;
434 (void)sfc_ev_qpoll(evq);
436 /* Check to see if the initialization complete indication
437 * posted by the hardware.
439 if (evq->init_state == SFC_EVQ_STARTED)
442 /* Give event queue some time to init */
443 rte_delay_us(delay_us);
445 total_delay_us += delay_us;
447 /* Exponential backoff */
449 if (delay_us > SFC_EVQ_INIT_BACKOFF_MAX_US)
450 delay_us = SFC_EVQ_INIT_BACKOFF_MAX_US;
452 } while (total_delay_us < SFC_EVQ_INIT_TIMEOUT_US);
461 evq->init_state = SFC_EVQ_INITIALIZED;
462 efx_ev_qdestroy(evq->common);
465 sfc_log_init(sa, "failed %d", rc);
470 sfc_ev_qstop(struct sfc_adapter *sa, unsigned int sw_index)
472 const struct sfc_evq_info *evq_info;
475 sfc_log_init(sa, "sw_index=%u", sw_index);
477 SFC_ASSERT(sw_index < sa->evq_count);
479 evq_info = &sa->evq_info[sw_index];
482 if (evq == NULL || evq->init_state != SFC_EVQ_STARTED)
485 evq->init_state = SFC_EVQ_INITIALIZED;
487 evq->exception = B_FALSE;
489 efx_ev_qdestroy(evq->common);
493 sfc_ev_mgmt_periodic_qpoll(void *arg)
495 struct sfc_adapter *sa = arg;
498 sfc_ev_mgmt_qpoll(sa);
500 rc = rte_eal_alarm_set(SFC_MGMT_EV_QPOLL_PERIOD_US,
501 sfc_ev_mgmt_periodic_qpoll, sa);
504 "cannot rearm management EVQ polling alarm (rc=%d)",
509 sfc_ev_mgmt_periodic_qpoll_start(struct sfc_adapter *sa)
511 sfc_ev_mgmt_periodic_qpoll(sa);
515 sfc_ev_mgmt_periodic_qpoll_stop(struct sfc_adapter *sa)
517 rte_eal_alarm_cancel(sfc_ev_mgmt_periodic_qpoll, sa);
521 sfc_ev_start(struct sfc_adapter *sa)
525 sfc_log_init(sa, "entry");
527 rc = efx_ev_init(sa->nic);
531 /* Start management EVQ used for global events */
532 rte_spinlock_lock(&sa->mgmt_evq_lock);
534 rc = sfc_ev_qstart(sa, sa->mgmt_evq_index);
536 goto fail_mgmt_evq_start;
538 if (sa->intr.lsc_intr) {
539 rc = sfc_ev_qprime(sa->evq_info[sa->mgmt_evq_index].evq);
541 goto fail_evq0_prime;
544 rte_spinlock_unlock(&sa->mgmt_evq_lock);
547 * Start management EVQ polling. If interrupts are disabled
548 * (not used), it is required to process link status change
549 * and other device level events to avoid unrecoverable
550 * error because the event queue overflow.
552 sfc_ev_mgmt_periodic_qpoll_start(sa);
555 * Rx/Tx event queues are started/stopped when corresponding
556 * Rx/Tx queue is started/stopped.
565 rte_spinlock_unlock(&sa->mgmt_evq_lock);
566 efx_ev_fini(sa->nic);
569 sfc_log_init(sa, "failed %d", rc);
574 sfc_ev_stop(struct sfc_adapter *sa)
576 unsigned int sw_index;
578 sfc_log_init(sa, "entry");
580 sfc_ev_mgmt_periodic_qpoll_stop(sa);
582 /* Make sure that all event queues are stopped */
583 sw_index = sa->evq_count;
584 while (sw_index-- > 0) {
585 if (sw_index == sa->mgmt_evq_index) {
586 /* Locks are required for the management EVQ */
587 rte_spinlock_lock(&sa->mgmt_evq_lock);
588 sfc_ev_qstop(sa, sa->mgmt_evq_index);
589 rte_spinlock_unlock(&sa->mgmt_evq_lock);
591 sfc_ev_qstop(sa, sw_index);
595 efx_ev_fini(sa->nic);
599 sfc_ev_qinit(struct sfc_adapter *sa, unsigned int sw_index,
600 unsigned int entries, int socket_id)
602 struct sfc_evq_info *evq_info;
606 sfc_log_init(sa, "sw_index=%u", sw_index);
608 evq_info = &sa->evq_info[sw_index];
610 SFC_ASSERT(rte_is_power_of_2(entries));
611 SFC_ASSERT(entries <= evq_info->max_entries);
612 evq_info->entries = entries;
614 evq = rte_zmalloc_socket("sfc-evq", sizeof(*evq), RTE_CACHE_LINE_SIZE,
620 evq->evq_index = sw_index;
622 /* Allocate DMA space */
623 rc = sfc_dma_alloc(sa, "evq", sw_index, EFX_EVQ_SIZE(evq_info->entries),
624 socket_id, &evq->mem);
628 evq->init_state = SFC_EVQ_INITIALIZED;
636 sfc_ev_qfini(struct sfc_adapter *sa, unsigned int sw_index)
640 sfc_log_init(sa, "sw_index=%u", sw_index);
642 evq = sa->evq_info[sw_index].evq;
644 SFC_ASSERT(evq->init_state == SFC_EVQ_INITIALIZED);
646 sa->evq_info[sw_index].evq = NULL;
648 sfc_dma_free(sa, &evq->mem);
654 sfc_ev_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
656 struct sfc_evq_info *evq_info = &sa->evq_info[sw_index];
657 unsigned int max_entries;
659 sfc_log_init(sa, "sw_index=%u", sw_index);
661 max_entries = sfc_evq_max_entries(sa, sw_index);
662 SFC_ASSERT(rte_is_power_of_2(max_entries));
664 evq_info->max_entries = max_entries;
665 evq_info->flags = sa->evq_flags |
666 ((sa->intr.lsc_intr && sw_index == sa->mgmt_evq_index) ?
667 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT :
668 EFX_EVQ_FLAGS_NOTIFY_DISABLED);
674 sfc_kvarg_perf_profile_handler(__rte_unused const char *key,
675 const char *value_str, void *opaque)
677 uint64_t *value = opaque;
679 if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_THROUGHPUT) == 0)
680 *value = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
681 else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_LOW_LATENCY) == 0)
682 *value = EFX_EVQ_FLAGS_TYPE_LOW_LATENCY;
683 else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_AUTO) == 0)
684 *value = EFX_EVQ_FLAGS_TYPE_AUTO;
692 sfc_ev_qfini_info(struct sfc_adapter *sa, unsigned int sw_index)
694 sfc_log_init(sa, "sw_index=%u", sw_index);
696 /* Nothing to cleanup */
700 sfc_ev_init(struct sfc_adapter *sa)
703 unsigned int sw_index;
705 sfc_log_init(sa, "entry");
707 sa->evq_flags = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
708 rc = sfc_kvargs_process(sa, SFC_KVARG_PERF_PROFILE,
709 sfc_kvarg_perf_profile_handler,
712 sfc_err(sa, "invalid %s parameter value",
713 SFC_KVARG_PERF_PROFILE);
714 goto fail_kvarg_perf_profile;
717 sa->evq_count = sfc_ev_qcount(sa);
718 sa->mgmt_evq_index = 0;
719 rte_spinlock_init(&sa->mgmt_evq_lock);
721 /* Allocate EVQ info array */
723 sa->evq_info = rte_calloc_socket("sfc-evqs", sa->evq_count,
724 sizeof(struct sfc_evq_info), 0,
726 if (sa->evq_info == NULL)
727 goto fail_evqs_alloc;
729 for (sw_index = 0; sw_index < sa->evq_count; ++sw_index) {
730 rc = sfc_ev_qinit_info(sa, sw_index);
732 goto fail_ev_qinit_info;
735 rc = sfc_ev_qinit(sa, sa->mgmt_evq_index, SFC_MGMT_EVQ_ENTRIES,
738 goto fail_mgmt_evq_init;
741 * Rx/Tx event queues are created/destroyed when corresponding
742 * Rx/Tx queue is created/destroyed.
749 while (sw_index-- > 0)
750 sfc_ev_qfini_info(sa, sw_index);
752 rte_free(sa->evq_info);
758 fail_kvarg_perf_profile:
759 sfc_log_init(sa, "failed %d", rc);
764 sfc_ev_fini(struct sfc_adapter *sa)
768 sfc_log_init(sa, "entry");
770 /* Cleanup all event queues */
771 sw_index = sa->evq_count;
772 while (--sw_index >= 0) {
773 if (sa->evq_info[sw_index].evq != NULL)
774 sfc_ev_qfini(sa, sw_index);
775 sfc_ev_qfini_info(sa, sw_index);
778 rte_free(sa->evq_info);