2 * Copyright (c) 2016 Solarflare Communications Inc.
5 * This software was jointly developed between OKTET Labs (under contract
6 * for Solarflare) and Solarflare Communications, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <rte_debug.h>
31 #include <rte_cycles.h>
32 #include <rte_alarm.h>
37 #include "sfc_debug.h"
44 /* Initial delay when waiting for event queue init complete event */
45 #define SFC_EVQ_INIT_BACKOFF_START_US (1)
46 /* Maximum delay between event queue polling attempts */
47 #define SFC_EVQ_INIT_BACKOFF_MAX_US (10 * 1000)
48 /* Event queue init approx timeout */
49 #define SFC_EVQ_INIT_TIMEOUT_US (2 * US_PER_S)
51 /* Management event queue polling period in microseconds */
52 #define SFC_MGMT_EV_QPOLL_PERIOD_US (US_PER_S)
56 sfc_ev_initialized(void *arg)
58 struct sfc_evq *evq = arg;
60 /* Init done events may be duplicated on SFN7xxx (SFC bug 31631) */
61 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTING ||
62 evq->init_state == SFC_EVQ_STARTED);
64 evq->init_state = SFC_EVQ_STARTED;
70 sfc_ev_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
71 uint32_t size, uint16_t flags)
73 struct sfc_evq *evq = arg;
76 unsigned int pending_id;
79 struct sfc_rx_sw_desc *rxd;
81 if (unlikely(evq->exception))
86 SFC_ASSERT(rxq != NULL);
87 SFC_ASSERT(rxq->evq == evq);
88 SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
90 stop = (id + 1) & rxq->ptr_mask;
91 pending_id = rxq->pending & rxq->ptr_mask;
92 delta = (stop >= pending_id) ? (stop - pending_id) :
93 (rxq->ptr_mask + 1 - pending_id + stop);
97 * Rx event with no new descriptors done and zero length
98 * is used to abort scattered packet when there is no room
101 if (unlikely(size != 0)) {
102 evq->exception = B_TRUE;
104 "EVQ %u RxQ %u invalid RX abort "
105 "(id=%#x size=%u flags=%#x); needs restart\n",
106 evq->evq_index, sfc_rxq_sw_index(rxq),
111 /* Add discard flag to the first fragment */
112 rxq->sw_desc[pending_id].flags |= EFX_DISCARD;
113 /* Remove continue flag from the last fragment */
114 rxq->sw_desc[id].flags &= ~EFX_PKT_CONT;
115 } else if (unlikely(delta > rxq->batch_max)) {
116 evq->exception = B_TRUE;
119 "EVQ %u RxQ %u completion out of order "
120 "(id=%#x delta=%u flags=%#x); needs restart\n",
121 evq->evq_index, sfc_rxq_sw_index(rxq), id, delta,
127 for (i = pending_id; i != stop; i = (i + 1) & rxq->ptr_mask) {
128 rxd = &rxq->sw_desc[i];
132 SFC_ASSERT(size < (1 << 16));
133 rxd->size = (uint16_t)size;
136 rxq->pending += delta;
143 sfc_ev_tx(void *arg, __rte_unused uint32_t label, uint32_t id)
145 struct sfc_evq *evq = arg;
152 SFC_ASSERT(txq != NULL);
153 SFC_ASSERT(txq->evq == evq);
155 if (unlikely((txq->state & SFC_TXQ_STARTED) == 0))
158 stop = (id + 1) & txq->ptr_mask;
159 id = txq->pending & txq->ptr_mask;
161 delta = (stop >= id) ? (stop - id) : (txq->ptr_mask + 1 - id + stop);
163 txq->pending += delta;
170 sfc_ev_exception(void *arg, __rte_unused uint32_t code,
171 __rte_unused uint32_t data)
173 struct sfc_evq *evq = arg;
175 if (code == EFX_EXCEPTION_UNKNOWN_SENSOREVT)
178 evq->exception = B_TRUE;
180 "hardware exception %s (code=%u, data=%#x) on EVQ %u;"
182 (code == EFX_EXCEPTION_RX_RECOVERY) ? "RX_RECOVERY" :
183 (code == EFX_EXCEPTION_RX_DSC_ERROR) ? "RX_DSC_ERROR" :
184 (code == EFX_EXCEPTION_TX_DSC_ERROR) ? "TX_DSC_ERROR" :
185 (code == EFX_EXCEPTION_FWALERT_SRAM) ? "FWALERT_SRAM" :
186 (code == EFX_EXCEPTION_UNKNOWN_FWALERT) ? "UNKNOWN_FWALERT" :
187 (code == EFX_EXCEPTION_RX_ERROR) ? "RX_ERROR" :
188 (code == EFX_EXCEPTION_TX_ERROR) ? "TX_ERROR" :
189 (code == EFX_EXCEPTION_EV_ERROR) ? "EV_ERROR" :
191 code, data, evq->evq_index);
197 sfc_ev_rxq_flush_done(void *arg, __rte_unused uint32_t rxq_hw_index)
199 struct sfc_evq *evq = arg;
203 SFC_ASSERT(rxq != NULL);
204 SFC_ASSERT(rxq->hw_index == rxq_hw_index);
205 SFC_ASSERT(rxq->evq == evq);
206 sfc_rx_qflush_done(rxq);
212 sfc_ev_rxq_flush_failed(void *arg, __rte_unused uint32_t rxq_hw_index)
214 struct sfc_evq *evq = arg;
218 SFC_ASSERT(rxq != NULL);
219 SFC_ASSERT(rxq->hw_index == rxq_hw_index);
220 SFC_ASSERT(rxq->evq == evq);
221 sfc_rx_qflush_failed(rxq);
227 sfc_ev_txq_flush_done(void *arg, __rte_unused uint32_t txq_hw_index)
229 struct sfc_evq *evq = arg;
233 SFC_ASSERT(txq != NULL);
234 SFC_ASSERT(txq->hw_index == txq_hw_index);
235 SFC_ASSERT(txq->evq == evq);
236 sfc_tx_qflush_done(txq);
242 sfc_ev_software(void *arg, uint16_t magic)
244 struct sfc_evq *evq = arg;
246 sfc_err(evq->sa, "EVQ %u unexpected software event magic=%#.4x",
247 evq->evq_index, magic);
252 sfc_ev_sram(void *arg, uint32_t code)
254 struct sfc_evq *evq = arg;
256 sfc_err(evq->sa, "EVQ %u unexpected SRAM event code=%u",
257 evq->evq_index, code);
262 sfc_ev_wake_up(void *arg, uint32_t index)
264 struct sfc_evq *evq = arg;
266 sfc_err(evq->sa, "EVQ %u unexpected wake up event index=%u",
267 evq->evq_index, index);
272 sfc_ev_timer(void *arg, uint32_t index)
274 struct sfc_evq *evq = arg;
276 sfc_err(evq->sa, "EVQ %u unexpected timer event index=%u",
277 evq->evq_index, index);
282 sfc_ev_link_change(void *arg, efx_link_mode_t link_mode)
284 struct sfc_evq *evq = arg;
285 struct sfc_adapter *sa = evq->sa;
286 struct rte_eth_link *dev_link = &sa->eth_dev->data->dev_link;
287 struct rte_eth_link new_link;
289 EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
291 sfc_port_link_mode_to_info(link_mode, &new_link);
292 rte_atomic64_set((rte_atomic64_t *)dev_link, *(uint64_t *)&new_link);
297 static const efx_ev_callbacks_t sfc_ev_callbacks = {
298 .eec_initialized = sfc_ev_initialized,
301 .eec_exception = sfc_ev_exception,
302 .eec_rxq_flush_done = sfc_ev_rxq_flush_done,
303 .eec_rxq_flush_failed = sfc_ev_rxq_flush_failed,
304 .eec_txq_flush_done = sfc_ev_txq_flush_done,
305 .eec_software = sfc_ev_software,
306 .eec_sram = sfc_ev_sram,
307 .eec_wake_up = sfc_ev_wake_up,
308 .eec_timer = sfc_ev_timer,
309 .eec_link_change = sfc_ev_link_change,
314 sfc_ev_qpoll(struct sfc_evq *evq)
316 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED ||
317 evq->init_state == SFC_EVQ_STARTING);
319 /* Synchronize the DMA memory for reading not required */
321 efx_ev_qpoll(evq->common, &evq->read_ptr, &sfc_ev_callbacks, evq);
323 /* Poll-mode driver does not re-prime the event queue for interrupts */
327 sfc_ev_mgmt_qpoll(struct sfc_adapter *sa)
329 if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) {
330 struct sfc_evq *mgmt_evq = sa->evq_info[sa->mgmt_evq_index].evq;
332 if (mgmt_evq->init_state == SFC_EVQ_STARTED)
333 sfc_ev_qpoll(mgmt_evq);
335 rte_spinlock_unlock(&sa->mgmt_evq_lock);
340 sfc_ev_qprime(struct sfc_evq *evq)
342 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED);
343 return efx_ev_qprime(evq->common, evq->read_ptr);
347 sfc_ev_qstart(struct sfc_adapter *sa, unsigned int sw_index)
349 const struct sfc_evq_info *evq_info;
352 unsigned int total_delay_us;
353 unsigned int delay_us;
356 sfc_log_init(sa, "sw_index=%u", sw_index);
358 evq_info = &sa->evq_info[sw_index];
362 /* Clear all events */
363 (void)memset((void *)esmp->esm_base, 0xff,
364 EFX_EVQ_SIZE(evq_info->entries));
366 /* Create the common code event queue */
367 rc = efx_ev_qcreate(sa->nic, sw_index, esmp, evq_info->entries,
368 0 /* unused on EF10 */, 0,
369 EFX_EVQ_FLAGS_TYPE_THROUGHPUT |
370 EFX_EVQ_FLAGS_NOTIFY_DISABLED,
373 goto fail_ev_qcreate;
375 evq->init_state = SFC_EVQ_STARTING;
377 /* Wait for the initialization event */
379 delay_us = SFC_EVQ_INIT_BACKOFF_START_US;
381 (void)sfc_ev_qpoll(evq);
383 /* Check to see if the initialization complete indication
384 * posted by the hardware.
386 if (evq->init_state == SFC_EVQ_STARTED)
389 /* Give event queue some time to init */
390 rte_delay_us(delay_us);
392 total_delay_us += delay_us;
394 /* Exponential backoff */
396 if (delay_us > SFC_EVQ_INIT_BACKOFF_MAX_US)
397 delay_us = SFC_EVQ_INIT_BACKOFF_MAX_US;
399 } while (total_delay_us < SFC_EVQ_INIT_TIMEOUT_US);
408 evq->init_state = SFC_EVQ_INITIALIZED;
409 efx_ev_qdestroy(evq->common);
412 sfc_log_init(sa, "failed %d", rc);
417 sfc_ev_qstop(struct sfc_adapter *sa, unsigned int sw_index)
419 const struct sfc_evq_info *evq_info;
422 sfc_log_init(sa, "sw_index=%u", sw_index);
424 SFC_ASSERT(sw_index < sa->evq_count);
426 evq_info = &sa->evq_info[sw_index];
429 if (evq == NULL || evq->init_state != SFC_EVQ_STARTED)
432 evq->init_state = SFC_EVQ_INITIALIZED;
434 evq->exception = B_FALSE;
436 efx_ev_qdestroy(evq->common);
440 sfc_ev_mgmt_periodic_qpoll(void *arg)
442 struct sfc_adapter *sa = arg;
445 sfc_ev_mgmt_qpoll(sa);
447 rc = rte_eal_alarm_set(SFC_MGMT_EV_QPOLL_PERIOD_US,
448 sfc_ev_mgmt_periodic_qpoll, sa);
451 "cannot rearm management EVQ polling alarm (rc=%d)",
456 sfc_ev_mgmt_periodic_qpoll_start(struct sfc_adapter *sa)
458 sfc_ev_mgmt_periodic_qpoll(sa);
462 sfc_ev_mgmt_periodic_qpoll_stop(struct sfc_adapter *sa)
464 rte_eal_alarm_cancel(sfc_ev_mgmt_periodic_qpoll, sa);
468 sfc_ev_start(struct sfc_adapter *sa)
472 sfc_log_init(sa, "entry");
474 rc = efx_ev_init(sa->nic);
478 /* Start management EVQ used for global events */
479 rte_spinlock_lock(&sa->mgmt_evq_lock);
481 rc = sfc_ev_qstart(sa, sa->mgmt_evq_index);
483 goto fail_mgmt_evq_start;
485 rte_spinlock_unlock(&sa->mgmt_evq_lock);
488 * Start management EVQ polling. If interrupts are disabled
489 * (not used), it is required to process link status change
490 * and other device level events to avoid unrecoverable
491 * error because the event queue overflow.
493 sfc_ev_mgmt_periodic_qpoll_start(sa);
496 * Rx/Tx event queues are started/stopped when corresponding
497 * Rx/Tx queue is started/stopped.
503 rte_spinlock_unlock(&sa->mgmt_evq_lock);
504 efx_ev_fini(sa->nic);
507 sfc_log_init(sa, "failed %d", rc);
512 sfc_ev_stop(struct sfc_adapter *sa)
514 unsigned int sw_index;
516 sfc_log_init(sa, "entry");
518 sfc_ev_mgmt_periodic_qpoll_stop(sa);
520 /* Make sure that all event queues are stopped */
521 sw_index = sa->evq_count;
522 while (sw_index-- > 0) {
523 if (sw_index == sa->mgmt_evq_index) {
524 /* Locks are required for the management EVQ */
525 rte_spinlock_lock(&sa->mgmt_evq_lock);
526 sfc_ev_qstop(sa, sa->mgmt_evq_index);
527 rte_spinlock_unlock(&sa->mgmt_evq_lock);
529 sfc_ev_qstop(sa, sw_index);
533 efx_ev_fini(sa->nic);
537 sfc_ev_qinit(struct sfc_adapter *sa, unsigned int sw_index,
538 unsigned int entries, int socket_id)
540 struct sfc_evq_info *evq_info;
544 sfc_log_init(sa, "sw_index=%u", sw_index);
546 evq_info = &sa->evq_info[sw_index];
548 SFC_ASSERT(rte_is_power_of_2(entries));
549 SFC_ASSERT(entries <= evq_info->max_entries);
550 evq_info->entries = entries;
552 evq = rte_zmalloc_socket("sfc-evq", sizeof(*evq), RTE_CACHE_LINE_SIZE,
558 evq->evq_index = sw_index;
560 /* Allocate DMA space */
561 rc = sfc_dma_alloc(sa, "evq", sw_index, EFX_EVQ_SIZE(evq_info->entries),
562 socket_id, &evq->mem);
566 evq->init_state = SFC_EVQ_INITIALIZED;
574 sfc_ev_qfini(struct sfc_adapter *sa, unsigned int sw_index)
578 sfc_log_init(sa, "sw_index=%u", sw_index);
580 evq = sa->evq_info[sw_index].evq;
582 SFC_ASSERT(evq->init_state == SFC_EVQ_INITIALIZED);
584 sa->evq_info[sw_index].evq = NULL;
586 sfc_dma_free(sa, &evq->mem);
592 sfc_ev_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
594 struct sfc_evq_info *evq_info = &sa->evq_info[sw_index];
595 unsigned int max_entries;
597 sfc_log_init(sa, "sw_index=%u", sw_index);
599 max_entries = sfc_evq_max_entries(sa, sw_index);
600 SFC_ASSERT(rte_is_power_of_2(max_entries));
602 evq_info->max_entries = max_entries;
608 sfc_ev_qfini_info(struct sfc_adapter *sa, unsigned int sw_index)
610 sfc_log_init(sa, "sw_index=%u", sw_index);
612 /* Nothing to cleanup */
616 sfc_ev_init(struct sfc_adapter *sa)
619 unsigned int sw_index;
621 sfc_log_init(sa, "entry");
623 sa->evq_count = sfc_ev_qcount(sa);
624 sa->mgmt_evq_index = 0;
625 rte_spinlock_init(&sa->mgmt_evq_lock);
627 /* Allocate EVQ info array */
629 sa->evq_info = rte_calloc_socket("sfc-evqs", sa->evq_count,
630 sizeof(struct sfc_evq_info), 0,
632 if (sa->evq_info == NULL)
633 goto fail_evqs_alloc;
635 for (sw_index = 0; sw_index < sa->evq_count; ++sw_index) {
636 rc = sfc_ev_qinit_info(sa, sw_index);
638 goto fail_ev_qinit_info;
641 rc = sfc_ev_qinit(sa, sa->mgmt_evq_index, SFC_MGMT_EVQ_ENTRIES,
644 goto fail_mgmt_evq_init;
647 * Rx/Tx event queues are created/destroyed when corresponding
648 * Rx/Tx queue is created/destroyed.
655 while (sw_index-- > 0)
656 sfc_ev_qfini_info(sa, sw_index);
658 rte_free(sa->evq_info);
663 sfc_log_init(sa, "failed %d", rc);
668 sfc_ev_fini(struct sfc_adapter *sa)
672 sfc_log_init(sa, "entry");
674 /* Cleanup all event queues */
675 sw_index = sa->evq_count;
676 while (--sw_index >= 0) {
677 if (sa->evq_info[sw_index].evq != NULL)
678 sfc_ev_qfini(sa, sw_index);
679 sfc_ev_qfini_info(sa, sw_index);
682 rte_free(sa->evq_info);