2 * Copyright (c) 2016 Solarflare Communications Inc.
5 * This software was jointly developed between OKTET Labs (under contract
6 * for Solarflare) and Solarflare Communications, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_errno.h>
42 sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,
43 size_t len, int socket_id, efsys_mem_t *esmp)
45 const struct rte_memzone *mz;
47 sfc_log_init(sa, "name=%s id=%u len=%lu socket_id=%d",
48 name, id, len, socket_id);
50 mz = rte_eth_dma_zone_reserve(sa->eth_dev, name, id, len,
51 sysconf(_SC_PAGESIZE), socket_id);
53 sfc_err(sa, "cannot reserve DMA zone for %s:%u %#x@%d: %s",
54 name, (unsigned int)id, (unsigned int)len, socket_id,
55 rte_strerror(rte_errno));
59 esmp->esm_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
60 if (esmp->esm_addr == RTE_BAD_PHYS_ADDR) {
61 (void)rte_memzone_free(mz);
66 esmp->esm_base = mz->addr;
72 sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp)
76 sfc_log_init(sa, "name=%s", esmp->esm_mz->name);
78 rc = rte_memzone_free(esmp->esm_mz);
80 sfc_err(sa, "rte_memzone_free(() failed: %d", rc);
82 memset(esmp, 0, sizeof(*esmp));
86 * Check requested device level configuration.
87 * Receive and transmit configuration is checked in corresponding
91 sfc_check_conf(struct sfc_adapter *sa)
93 const struct rte_eth_conf *conf = &sa->eth_dev->data->dev_conf;
96 if (conf->link_speeds != ETH_LINK_SPEED_AUTONEG) {
97 sfc_err(sa, "Manual link speed/duplex choice not supported");
101 if (conf->lpbk_mode != 0) {
102 sfc_err(sa, "Loopback not supported");
106 if (conf->dcb_capability_en != 0) {
107 sfc_err(sa, "Priority-based flow control not supported");
111 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
112 sfc_err(sa, "Flow Director not supported");
116 if (conf->intr_conf.lsc != 0) {
117 sfc_err(sa, "Link status change interrupt not supported");
121 if (conf->intr_conf.rxq != 0) {
122 sfc_err(sa, "Receive queue interrupt not supported");
130 * Find out maximum number of receive and transmit queues which could be
133 * NIC is kept initialized on success to allow other modules acquire
134 * defaults and capabilities.
137 sfc_estimate_resource_limits(struct sfc_adapter *sa)
139 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
140 efx_drv_limits_t limits;
142 uint32_t evq_allocated;
143 uint32_t rxq_allocated;
144 uint32_t txq_allocated;
146 memset(&limits, 0, sizeof(limits));
148 /* Request at least one Rx and Tx queue */
149 limits.edl_min_rxq_count = 1;
150 limits.edl_min_txq_count = 1;
151 /* Management event queue plus event queue for each Tx and Rx queue */
152 limits.edl_min_evq_count =
153 1 + limits.edl_min_rxq_count + limits.edl_min_txq_count;
155 /* Divide by number of functions to guarantee that all functions
156 * will get promised resources
158 /* FIXME Divide by number of functions (not 2) below */
159 limits.edl_max_evq_count = encp->enc_evq_limit / 2;
160 SFC_ASSERT(limits.edl_max_evq_count >= limits.edl_min_rxq_count);
162 /* Split equally between receive and transmit */
163 limits.edl_max_rxq_count =
164 MIN(encp->enc_rxq_limit, (limits.edl_max_evq_count - 1) / 2);
165 SFC_ASSERT(limits.edl_max_rxq_count >= limits.edl_min_rxq_count);
167 limits.edl_max_txq_count =
168 MIN(encp->enc_txq_limit,
169 limits.edl_max_evq_count - 1 - limits.edl_max_rxq_count);
170 SFC_ASSERT(limits.edl_max_txq_count >= limits.edl_min_rxq_count);
172 /* Configure the minimum required resources needed for the
173 * driver to operate, and the maximum desired resources that the
174 * driver is capable of using.
176 efx_nic_set_drv_limits(sa->nic, &limits);
178 sfc_log_init(sa, "init nic");
179 rc = efx_nic_init(sa->nic);
183 /* Find resource dimensions assigned by firmware to this function */
184 rc = efx_nic_get_vi_pool(sa->nic, &evq_allocated, &rxq_allocated,
187 goto fail_get_vi_pool;
189 /* It still may allocate more than maximum, ensure limit */
190 evq_allocated = MIN(evq_allocated, limits.edl_max_evq_count);
191 rxq_allocated = MIN(rxq_allocated, limits.edl_max_rxq_count);
192 txq_allocated = MIN(txq_allocated, limits.edl_max_txq_count);
194 /* Subtract management EVQ not used for traffic */
195 SFC_ASSERT(evq_allocated > 0);
198 /* Right now we use separate EVQ for Rx and Tx */
199 sa->rxq_max = MIN(rxq_allocated, evq_allocated / 2);
200 sa->txq_max = MIN(txq_allocated, evq_allocated - sa->rxq_max);
202 /* Keep NIC initialized */
207 efx_nic_fini(sa->nic);
212 sfc_set_drv_limits(struct sfc_adapter *sa)
214 const struct rte_eth_dev_data *data = sa->eth_dev->data;
215 efx_drv_limits_t lim;
217 memset(&lim, 0, sizeof(lim));
219 /* Limits are strict since take into account initial estimation */
220 lim.edl_min_evq_count = lim.edl_max_evq_count =
221 1 + data->nb_rx_queues + data->nb_tx_queues;
222 lim.edl_min_rxq_count = lim.edl_max_rxq_count = data->nb_rx_queues;
223 lim.edl_min_txq_count = lim.edl_max_txq_count = data->nb_tx_queues;
225 return efx_nic_set_drv_limits(sa->nic, &lim);
229 sfc_start(struct sfc_adapter *sa)
233 sfc_log_init(sa, "entry");
235 SFC_ASSERT(sfc_adapter_is_locked(sa));
238 case SFC_ADAPTER_CONFIGURED:
240 case SFC_ADAPTER_STARTED:
241 sfc_info(sa, "already started");
248 sa->state = SFC_ADAPTER_STARTING;
250 sfc_log_init(sa, "set resource limits");
251 rc = sfc_set_drv_limits(sa);
253 goto fail_set_drv_limits;
255 sfc_log_init(sa, "init nic");
256 rc = efx_nic_init(sa->nic);
260 sa->state = SFC_ADAPTER_STARTED;
261 sfc_log_init(sa, "done");
266 sa->state = SFC_ADAPTER_CONFIGURED;
268 sfc_log_init(sa, "failed %d", rc);
273 sfc_stop(struct sfc_adapter *sa)
275 sfc_log_init(sa, "entry");
277 SFC_ASSERT(sfc_adapter_is_locked(sa));
280 case SFC_ADAPTER_STARTED:
282 case SFC_ADAPTER_CONFIGURED:
283 sfc_info(sa, "already stopped");
286 sfc_err(sa, "stop in unexpected state %u", sa->state);
291 sa->state = SFC_ADAPTER_STOPPING;
293 efx_nic_fini(sa->nic);
295 sa->state = SFC_ADAPTER_CONFIGURED;
296 sfc_log_init(sa, "done");
300 sfc_configure(struct sfc_adapter *sa)
304 sfc_log_init(sa, "entry");
306 SFC_ASSERT(sfc_adapter_is_locked(sa));
308 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED);
309 sa->state = SFC_ADAPTER_CONFIGURING;
311 rc = sfc_check_conf(sa);
313 goto fail_check_conf;
315 sa->state = SFC_ADAPTER_CONFIGURED;
316 sfc_log_init(sa, "done");
320 sa->state = SFC_ADAPTER_INITIALIZED;
321 sfc_log_init(sa, "failed %d", rc);
326 sfc_close(struct sfc_adapter *sa)
328 sfc_log_init(sa, "entry");
330 SFC_ASSERT(sfc_adapter_is_locked(sa));
332 SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
333 sa->state = SFC_ADAPTER_CLOSING;
335 sa->state = SFC_ADAPTER_INITIALIZED;
336 sfc_log_init(sa, "done");
340 sfc_mem_bar_init(struct sfc_adapter *sa)
342 struct rte_eth_dev *eth_dev = sa->eth_dev;
343 struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(eth_dev);
344 efsys_bar_t *ebp = &sa->mem_bar;
346 struct rte_mem_resource *res;
348 for (i = 0; i < RTE_DIM(pci_dev->mem_resource); i++) {
349 res = &pci_dev->mem_resource[i];
350 if ((res->len != 0) && (res->phys_addr != 0)) {
351 /* Found first memory BAR */
352 SFC_BAR_LOCK_INIT(ebp, eth_dev->data->name);
354 ebp->esb_dev = pci_dev;
355 ebp->esb_base = res->addr;
364 sfc_mem_bar_fini(struct sfc_adapter *sa)
366 efsys_bar_t *ebp = &sa->mem_bar;
368 SFC_BAR_LOCK_DESTROY(ebp);
369 memset(ebp, 0, sizeof(*ebp));
373 sfc_attach(struct sfc_adapter *sa)
375 struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(sa->eth_dev);
379 sfc_log_init(sa, "entry");
381 SFC_ASSERT(sfc_adapter_is_locked(sa));
383 sa->socket_id = rte_socket_id();
385 sfc_log_init(sa, "init mem bar");
386 rc = sfc_mem_bar_init(sa);
388 goto fail_mem_bar_init;
390 sfc_log_init(sa, "get family");
391 rc = efx_family(pci_dev->id.vendor_id, pci_dev->id.device_id,
395 sfc_log_init(sa, "family is %u", sa->family);
397 sfc_log_init(sa, "create nic");
398 rte_spinlock_init(&sa->nic_lock);
399 rc = efx_nic_create(sa->family, (efsys_identifier_t *)sa,
400 &sa->mem_bar, &sa->nic_lock, &enp);
402 goto fail_nic_create;
405 rc = sfc_mcdi_init(sa);
409 sfc_log_init(sa, "probe nic");
410 rc = efx_nic_probe(enp);
414 efx_mcdi_new_epoch(enp);
416 sfc_log_init(sa, "reset nic");
417 rc = efx_nic_reset(enp);
421 sfc_log_init(sa, "estimate resource limits");
422 rc = sfc_estimate_resource_limits(sa);
424 goto fail_estimate_rsrc_limits;
426 sfc_log_init(sa, "fini nic");
429 sa->state = SFC_ADAPTER_INITIALIZED;
431 sfc_log_init(sa, "done");
434 fail_estimate_rsrc_limits:
436 sfc_log_init(sa, "unprobe nic");
437 efx_nic_unprobe(enp);
443 sfc_log_init(sa, "destroy nic");
445 efx_nic_destroy(enp);
449 sfc_mem_bar_fini(sa);
452 sfc_log_init(sa, "failed %d", rc);
457 sfc_detach(struct sfc_adapter *sa)
459 efx_nic_t *enp = sa->nic;
461 sfc_log_init(sa, "entry");
463 SFC_ASSERT(sfc_adapter_is_locked(sa));
465 sfc_log_init(sa, "unprobe nic");
466 efx_nic_unprobe(enp);
470 sfc_log_init(sa, "destroy nic");
472 efx_nic_destroy(enp);
474 sfc_mem_bar_fini(sa);
476 sa->state = SFC_ADAPTER_UNINITIALIZED;