2 * Copyright (c) 2016 Solarflare Communications Inc.
5 * This software was jointly developed between OKTET Labs (under contract
6 * for Solarflare) and Solarflare Communications, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <rte_debug.h>
31 #include <rte_cycles.h>
32 #include <rte_alarm.h>
37 #include "sfc_debug.h"
44 /* Initial delay when waiting for event queue init complete event */
45 #define SFC_EVQ_INIT_BACKOFF_START_US (1)
46 /* Maximum delay between event queue polling attempts */
47 #define SFC_EVQ_INIT_BACKOFF_MAX_US (10 * 1000)
48 /* Event queue init approx timeout */
49 #define SFC_EVQ_INIT_TIMEOUT_US (2 * US_PER_S)
51 /* Management event queue polling period in microseconds */
52 #define SFC_MGMT_EV_QPOLL_PERIOD_US (US_PER_S)
56 sfc_ev_initialized(void *arg)
58 struct sfc_evq *evq = arg;
60 /* Init done events may be duplicated on SFN7xxx (SFC bug 31631) */
61 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTING ||
62 evq->init_state == SFC_EVQ_STARTED);
64 evq->init_state = SFC_EVQ_STARTED;
70 sfc_ev_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
71 uint32_t size, uint16_t flags)
73 struct sfc_evq *evq = arg;
76 unsigned int pending_id;
79 struct sfc_rx_sw_desc *rxd;
81 if (unlikely(evq->exception))
86 SFC_ASSERT(rxq != NULL);
87 SFC_ASSERT(rxq->evq == evq);
88 SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
90 stop = (id + 1) & rxq->ptr_mask;
91 pending_id = rxq->pending & rxq->ptr_mask;
92 delta = (stop >= pending_id) ? (stop - pending_id) :
93 (rxq->ptr_mask + 1 - pending_id + stop);
97 * Rx event with no new descriptors done and zero length
98 * is used to abort scattered packet when there is no room
101 if (unlikely(size != 0)) {
102 evq->exception = B_TRUE;
104 "EVQ %u RxQ %u invalid RX abort "
105 "(id=%#x size=%u flags=%#x); needs restart\n",
106 evq->evq_index, sfc_rxq_sw_index(rxq),
111 /* Add discard flag to the first fragment */
112 rxq->sw_desc[pending_id].flags |= EFX_DISCARD;
113 /* Remove continue flag from the last fragment */
114 rxq->sw_desc[id].flags &= ~EFX_PKT_CONT;
115 } else if (unlikely(delta > rxq->batch_max)) {
116 evq->exception = B_TRUE;
119 "EVQ %u RxQ %u completion out of order "
120 "(id=%#x delta=%u flags=%#x); needs restart\n",
121 evq->evq_index, sfc_rxq_sw_index(rxq), id, delta,
127 for (i = pending_id; i != stop; i = (i + 1) & rxq->ptr_mask) {
128 rxd = &rxq->sw_desc[i];
132 SFC_ASSERT(size < (1 << 16));
133 rxd->size = (uint16_t)size;
136 rxq->pending += delta;
143 sfc_ev_tx(void *arg, __rte_unused uint32_t label, __rte_unused uint32_t id)
145 struct sfc_evq *evq = arg;
147 sfc_err(evq->sa, "EVQ %u unexpected Tx event", evq->evq_index);
152 sfc_ev_exception(void *arg, __rte_unused uint32_t code,
153 __rte_unused uint32_t data)
155 struct sfc_evq *evq = arg;
157 if (code == EFX_EXCEPTION_UNKNOWN_SENSOREVT)
160 evq->exception = B_TRUE;
162 "hardware exception %s (code=%u, data=%#x) on EVQ %u;"
164 (code == EFX_EXCEPTION_RX_RECOVERY) ? "RX_RECOVERY" :
165 (code == EFX_EXCEPTION_RX_DSC_ERROR) ? "RX_DSC_ERROR" :
166 (code == EFX_EXCEPTION_TX_DSC_ERROR) ? "TX_DSC_ERROR" :
167 (code == EFX_EXCEPTION_FWALERT_SRAM) ? "FWALERT_SRAM" :
168 (code == EFX_EXCEPTION_UNKNOWN_FWALERT) ? "UNKNOWN_FWALERT" :
169 (code == EFX_EXCEPTION_RX_ERROR) ? "RX_ERROR" :
170 (code == EFX_EXCEPTION_TX_ERROR) ? "TX_ERROR" :
171 (code == EFX_EXCEPTION_EV_ERROR) ? "EV_ERROR" :
173 code, data, evq->evq_index);
179 sfc_ev_rxq_flush_done(void *arg, __rte_unused uint32_t rxq_hw_index)
181 struct sfc_evq *evq = arg;
185 SFC_ASSERT(rxq != NULL);
186 SFC_ASSERT(rxq->hw_index == rxq_hw_index);
187 SFC_ASSERT(rxq->evq == evq);
188 sfc_rx_qflush_done(rxq);
194 sfc_ev_rxq_flush_failed(void *arg, __rte_unused uint32_t rxq_hw_index)
196 struct sfc_evq *evq = arg;
200 SFC_ASSERT(rxq != NULL);
201 SFC_ASSERT(rxq->hw_index == rxq_hw_index);
202 SFC_ASSERT(rxq->evq == evq);
203 sfc_rx_qflush_failed(rxq);
209 sfc_ev_txq_flush_done(void *arg, __rte_unused uint32_t txq_hw_index)
211 struct sfc_evq *evq = arg;
215 SFC_ASSERT(txq != NULL);
216 SFC_ASSERT(txq->hw_index == txq_hw_index);
217 SFC_ASSERT(txq->evq == evq);
218 sfc_tx_qflush_done(txq);
224 sfc_ev_software(void *arg, uint16_t magic)
226 struct sfc_evq *evq = arg;
228 sfc_err(evq->sa, "EVQ %u unexpected software event magic=%#.4x",
229 evq->evq_index, magic);
234 sfc_ev_sram(void *arg, uint32_t code)
236 struct sfc_evq *evq = arg;
238 sfc_err(evq->sa, "EVQ %u unexpected SRAM event code=%u",
239 evq->evq_index, code);
244 sfc_ev_wake_up(void *arg, uint32_t index)
246 struct sfc_evq *evq = arg;
248 sfc_err(evq->sa, "EVQ %u unexpected wake up event index=%u",
249 evq->evq_index, index);
254 sfc_ev_timer(void *arg, uint32_t index)
256 struct sfc_evq *evq = arg;
258 sfc_err(evq->sa, "EVQ %u unexpected timer event index=%u",
259 evq->evq_index, index);
264 sfc_ev_link_change(void *arg, efx_link_mode_t link_mode)
266 struct sfc_evq *evq = arg;
267 struct sfc_adapter *sa = evq->sa;
268 struct rte_eth_link *dev_link = &sa->eth_dev->data->dev_link;
269 struct rte_eth_link new_link;
271 EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
273 sfc_port_link_mode_to_info(link_mode, &new_link);
274 rte_atomic64_set((rte_atomic64_t *)dev_link, *(uint64_t *)&new_link);
279 static const efx_ev_callbacks_t sfc_ev_callbacks = {
280 .eec_initialized = sfc_ev_initialized,
283 .eec_exception = sfc_ev_exception,
284 .eec_rxq_flush_done = sfc_ev_rxq_flush_done,
285 .eec_rxq_flush_failed = sfc_ev_rxq_flush_failed,
286 .eec_txq_flush_done = sfc_ev_txq_flush_done,
287 .eec_software = sfc_ev_software,
288 .eec_sram = sfc_ev_sram,
289 .eec_wake_up = sfc_ev_wake_up,
290 .eec_timer = sfc_ev_timer,
291 .eec_link_change = sfc_ev_link_change,
296 sfc_ev_qpoll(struct sfc_evq *evq)
298 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED ||
299 evq->init_state == SFC_EVQ_STARTING);
301 /* Synchronize the DMA memory for reading not required */
303 efx_ev_qpoll(evq->common, &evq->read_ptr, &sfc_ev_callbacks, evq);
305 /* Poll-mode driver does not re-prime the event queue for interrupts */
309 sfc_ev_mgmt_qpoll(struct sfc_adapter *sa)
311 if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) {
312 struct sfc_evq *mgmt_evq = sa->evq_info[sa->mgmt_evq_index].evq;
314 if (mgmt_evq->init_state == SFC_EVQ_STARTED)
315 sfc_ev_qpoll(mgmt_evq);
317 rte_spinlock_unlock(&sa->mgmt_evq_lock);
322 sfc_ev_qprime(struct sfc_evq *evq)
324 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED);
325 return efx_ev_qprime(evq->common, evq->read_ptr);
329 sfc_ev_qstart(struct sfc_adapter *sa, unsigned int sw_index)
331 const struct sfc_evq_info *evq_info;
334 unsigned int total_delay_us;
335 unsigned int delay_us;
338 sfc_log_init(sa, "sw_index=%u", sw_index);
340 evq_info = &sa->evq_info[sw_index];
344 /* Clear all events */
345 (void)memset((void *)esmp->esm_base, 0xff,
346 EFX_EVQ_SIZE(evq_info->entries));
348 /* Create the common code event queue */
349 rc = efx_ev_qcreate(sa->nic, sw_index, esmp, evq_info->entries,
350 0 /* unused on EF10 */, 0,
351 EFX_EVQ_FLAGS_TYPE_THROUGHPUT |
352 EFX_EVQ_FLAGS_NOTIFY_DISABLED,
355 goto fail_ev_qcreate;
357 evq->init_state = SFC_EVQ_STARTING;
359 /* Wait for the initialization event */
361 delay_us = SFC_EVQ_INIT_BACKOFF_START_US;
363 (void)sfc_ev_qpoll(evq);
365 /* Check to see if the initialization complete indication
366 * posted by the hardware.
368 if (evq->init_state == SFC_EVQ_STARTED)
371 /* Give event queue some time to init */
372 rte_delay_us(delay_us);
374 total_delay_us += delay_us;
376 /* Exponential backoff */
378 if (delay_us > SFC_EVQ_INIT_BACKOFF_MAX_US)
379 delay_us = SFC_EVQ_INIT_BACKOFF_MAX_US;
381 } while (total_delay_us < SFC_EVQ_INIT_TIMEOUT_US);
390 evq->init_state = SFC_EVQ_INITIALIZED;
391 efx_ev_qdestroy(evq->common);
394 sfc_log_init(sa, "failed %d", rc);
399 sfc_ev_qstop(struct sfc_adapter *sa, unsigned int sw_index)
401 const struct sfc_evq_info *evq_info;
404 sfc_log_init(sa, "sw_index=%u", sw_index);
406 SFC_ASSERT(sw_index < sa->evq_count);
408 evq_info = &sa->evq_info[sw_index];
411 if (evq == NULL || evq->init_state != SFC_EVQ_STARTED)
414 evq->init_state = SFC_EVQ_INITIALIZED;
416 evq->exception = B_FALSE;
418 efx_ev_qdestroy(evq->common);
422 sfc_ev_mgmt_periodic_qpoll(void *arg)
424 struct sfc_adapter *sa = arg;
427 sfc_ev_mgmt_qpoll(sa);
429 rc = rte_eal_alarm_set(SFC_MGMT_EV_QPOLL_PERIOD_US,
430 sfc_ev_mgmt_periodic_qpoll, sa);
433 "cannot rearm management EVQ polling alarm (rc=%d)",
438 sfc_ev_mgmt_periodic_qpoll_start(struct sfc_adapter *sa)
440 sfc_ev_mgmt_periodic_qpoll(sa);
444 sfc_ev_mgmt_periodic_qpoll_stop(struct sfc_adapter *sa)
446 rte_eal_alarm_cancel(sfc_ev_mgmt_periodic_qpoll, sa);
450 sfc_ev_start(struct sfc_adapter *sa)
454 sfc_log_init(sa, "entry");
456 rc = efx_ev_init(sa->nic);
460 /* Start management EVQ used for global events */
461 rte_spinlock_lock(&sa->mgmt_evq_lock);
463 rc = sfc_ev_qstart(sa, sa->mgmt_evq_index);
465 goto fail_mgmt_evq_start;
467 rte_spinlock_unlock(&sa->mgmt_evq_lock);
470 * Start management EVQ polling. If interrupts are disabled
471 * (not used), it is required to process link status change
472 * and other device level events to avoid unrecoverable
473 * error because the event queue overflow.
475 sfc_ev_mgmt_periodic_qpoll_start(sa);
478 * Rx/Tx event queues are started/stopped when corresponding
479 * Rx/Tx queue is started/stopped.
485 rte_spinlock_unlock(&sa->mgmt_evq_lock);
486 efx_ev_fini(sa->nic);
489 sfc_log_init(sa, "failed %d", rc);
494 sfc_ev_stop(struct sfc_adapter *sa)
496 unsigned int sw_index;
498 sfc_log_init(sa, "entry");
500 sfc_ev_mgmt_periodic_qpoll_stop(sa);
502 /* Make sure that all event queues are stopped */
503 sw_index = sa->evq_count;
504 while (sw_index-- > 0) {
505 if (sw_index == sa->mgmt_evq_index) {
506 /* Locks are required for the management EVQ */
507 rte_spinlock_lock(&sa->mgmt_evq_lock);
508 sfc_ev_qstop(sa, sa->mgmt_evq_index);
509 rte_spinlock_unlock(&sa->mgmt_evq_lock);
511 sfc_ev_qstop(sa, sw_index);
515 efx_ev_fini(sa->nic);
519 sfc_ev_qinit(struct sfc_adapter *sa, unsigned int sw_index,
520 unsigned int entries, int socket_id)
522 struct sfc_evq_info *evq_info;
526 sfc_log_init(sa, "sw_index=%u", sw_index);
528 evq_info = &sa->evq_info[sw_index];
530 SFC_ASSERT(rte_is_power_of_2(entries));
531 SFC_ASSERT(entries <= evq_info->max_entries);
532 evq_info->entries = entries;
534 evq = rte_zmalloc_socket("sfc-evq", sizeof(*evq), RTE_CACHE_LINE_SIZE,
540 evq->evq_index = sw_index;
542 /* Allocate DMA space */
543 rc = sfc_dma_alloc(sa, "evq", sw_index, EFX_EVQ_SIZE(evq_info->entries),
544 socket_id, &evq->mem);
548 evq->init_state = SFC_EVQ_INITIALIZED;
556 sfc_ev_qfini(struct sfc_adapter *sa, unsigned int sw_index)
560 sfc_log_init(sa, "sw_index=%u", sw_index);
562 evq = sa->evq_info[sw_index].evq;
564 SFC_ASSERT(evq->init_state == SFC_EVQ_INITIALIZED);
566 sa->evq_info[sw_index].evq = NULL;
568 sfc_dma_free(sa, &evq->mem);
574 sfc_ev_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
576 struct sfc_evq_info *evq_info = &sa->evq_info[sw_index];
577 unsigned int max_entries;
579 sfc_log_init(sa, "sw_index=%u", sw_index);
581 max_entries = sfc_evq_max_entries(sa, sw_index);
582 SFC_ASSERT(rte_is_power_of_2(max_entries));
584 evq_info->max_entries = max_entries;
590 sfc_ev_qfini_info(struct sfc_adapter *sa, unsigned int sw_index)
592 sfc_log_init(sa, "sw_index=%u", sw_index);
594 /* Nothing to cleanup */
598 sfc_ev_init(struct sfc_adapter *sa)
601 unsigned int sw_index;
603 sfc_log_init(sa, "entry");
605 sa->evq_count = sfc_ev_qcount(sa);
606 sa->mgmt_evq_index = 0;
607 rte_spinlock_init(&sa->mgmt_evq_lock);
609 /* Allocate EVQ info array */
611 sa->evq_info = rte_calloc_socket("sfc-evqs", sa->evq_count,
612 sizeof(struct sfc_evq_info), 0,
614 if (sa->evq_info == NULL)
615 goto fail_evqs_alloc;
617 for (sw_index = 0; sw_index < sa->evq_count; ++sw_index) {
618 rc = sfc_ev_qinit_info(sa, sw_index);
620 goto fail_ev_qinit_info;
623 rc = sfc_ev_qinit(sa, sa->mgmt_evq_index, SFC_MGMT_EVQ_ENTRIES,
626 goto fail_mgmt_evq_init;
629 * Rx/Tx event queues are created/destroyed when corresponding
630 * Rx/Tx queue is created/destroyed.
637 while (sw_index-- > 0)
638 sfc_ev_qfini_info(sa, sw_index);
640 rte_free(sa->evq_info);
645 sfc_log_init(sa, "failed %d", rc);
650 sfc_ev_fini(struct sfc_adapter *sa)
654 sfc_log_init(sa, "entry");
656 /* Cleanup all event queues */
657 sw_index = sa->evq_count;
658 while (--sw_index >= 0) {
659 if (sa->evq_info[sw_index].evq != NULL)
660 sfc_ev_qfini(sa, sw_index);
661 sfc_ev_qfini_info(sa, sw_index);
664 rte_free(sa->evq_info);