2 * Copyright (c) 2016 Solarflare Communications Inc.
5 * This software was jointly developed between OKTET Labs (under contract
6 * for Solarflare) and Solarflare Communications, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 sfc_rx_qcheck_conf(struct sfc_adapter *sa,
39 const struct rte_eth_rxconf *rx_conf)
43 if (rx_conf->rx_thresh.pthresh != 0 ||
44 rx_conf->rx_thresh.hthresh != 0 ||
45 rx_conf->rx_thresh.wthresh != 0) {
47 "RxQ prefetch/host/writeback thresholds are not supported");
51 if (rx_conf->rx_free_thresh != 0) {
52 sfc_err(sa, "RxQ free threshold is not supported");
56 if (rx_conf->rx_drop_en == 0) {
57 sfc_err(sa, "RxQ drop disable is not supported");
61 if (rx_conf->rx_deferred_start != 0) {
62 sfc_err(sa, "RxQ deferred start is not supported");
70 sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool)
75 /* The mbuf object itself is always cache line aligned */
76 order = rte_bsf32(RTE_CACHE_LINE_SIZE);
78 /* Data offset from mbuf object start */
79 data_off = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mb_pool) +
82 order = MIN(order, rte_bsf32(data_off));
84 return 1u << (order - 1);
88 sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool)
90 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
91 const uint32_t nic_align_start = MAX(1, encp->enc_rx_buf_align_start);
92 const uint32_t nic_align_end = MAX(1, encp->enc_rx_buf_align_end);
94 unsigned int buf_aligned;
95 unsigned int start_alignment;
96 unsigned int end_padding_alignment;
98 /* Below it is assumed that both alignments are power of 2 */
99 SFC_ASSERT(rte_is_power_of_2(nic_align_start));
100 SFC_ASSERT(rte_is_power_of_2(nic_align_end));
103 * mbuf is always cache line aligned, double-check
104 * that it meets rx buffer start alignment requirements.
107 /* Start from mbuf pool data room size */
108 buf_size = rte_pktmbuf_data_room_size(mb_pool);
110 /* Remove headroom */
111 if (buf_size <= RTE_PKTMBUF_HEADROOM) {
113 "RxQ mbuf pool %s object data room size %u is smaller than headroom %u",
114 mb_pool->name, buf_size, RTE_PKTMBUF_HEADROOM);
117 buf_size -= RTE_PKTMBUF_HEADROOM;
119 /* Calculate guaranteed data start alignment */
120 buf_aligned = sfc_rx_mbuf_data_alignment(mb_pool);
122 /* Reserve space for start alignment */
123 if (buf_aligned < nic_align_start) {
124 start_alignment = nic_align_start - buf_aligned;
125 if (buf_size <= start_alignment) {
127 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u and buffer start alignment %u required by NIC",
129 rte_pktmbuf_data_room_size(mb_pool),
130 RTE_PKTMBUF_HEADROOM, start_alignment);
133 buf_aligned = nic_align_start;
134 buf_size -= start_alignment;
139 /* Make sure that end padding does not write beyond the buffer */
140 if (buf_aligned < nic_align_end) {
142 * Estimate space which can be lost. If guarnteed buffer
143 * size is odd, lost space is (nic_align_end - 1). More
144 * accurate formula is below.
146 end_padding_alignment = nic_align_end -
147 MIN(buf_aligned, 1u << (rte_bsf32(buf_size) - 1));
148 if (buf_size <= end_padding_alignment) {
150 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u, buffer start alignment %u and end padding alignment %u required by NIC",
152 rte_pktmbuf_data_room_size(mb_pool),
153 RTE_PKTMBUF_HEADROOM, start_alignment,
154 end_padding_alignment);
157 buf_size -= end_padding_alignment;
160 * Start is aligned the same or better than end,
163 buf_size = P2ALIGN(buf_size, nic_align_end);
170 sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
171 uint16_t nb_rx_desc, unsigned int socket_id,
172 const struct rte_eth_rxconf *rx_conf,
173 struct rte_mempool *mb_pool)
177 struct sfc_rxq_info *rxq_info;
178 unsigned int evq_index;
182 rc = sfc_rx_qcheck_conf(sa, rx_conf);
186 buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool);
188 sfc_err(sa, "RxQ %u mbuf pool object size is too small",
193 SFC_ASSERT(sw_index < sa->rxq_count);
194 rxq_info = &sa->rxq_info[sw_index];
196 SFC_ASSERT(nb_rx_desc <= rxq_info->max_entries);
197 rxq_info->entries = nb_rx_desc;
198 rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
200 evq_index = sfc_evq_index_by_rxq_sw_index(sa, sw_index);
202 rc = sfc_ev_qinit(sa, evq_index, rxq_info->entries, socket_id);
206 evq = sa->evq_info[evq_index].evq;
209 rxq = rte_zmalloc_socket("sfc-rxq", sizeof(*rxq), RTE_CACHE_LINE_SIZE,
214 rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries),
215 socket_id, &rxq->mem);
220 rxq->sw_desc = rte_calloc_socket("sfc-rxq-sw_desc", rxq_info->entries,
221 sizeof(*rxq->sw_desc),
222 RTE_CACHE_LINE_SIZE, socket_id);
223 if (rxq->sw_desc == NULL)
224 goto fail_desc_alloc;
228 rxq->ptr_mask = rxq_info->entries - 1;
229 rxq->refill_mb_pool = mb_pool;
230 rxq->buf_size = buf_size;
231 rxq->hw_index = sw_index;
233 rxq->state = SFC_RXQ_INITIALIZED;
240 sfc_dma_free(sa, &rxq->mem);
246 sfc_ev_qfini(sa, evq_index);
249 rxq_info->entries = 0;
252 sfc_log_init(sa, "failed %d", rc);
257 sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
259 struct sfc_rxq_info *rxq_info;
262 SFC_ASSERT(sw_index < sa->rxq_count);
264 rxq_info = &sa->rxq_info[sw_index];
267 SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
269 rxq_info->rxq = NULL;
270 rxq_info->entries = 0;
272 rte_free(rxq->sw_desc);
273 sfc_dma_free(sa, &rxq->mem);
278 sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
280 struct sfc_rxq_info *rxq_info = &sa->rxq_info[sw_index];
281 unsigned int max_entries;
283 max_entries = EFX_RXQ_MAXNDESCS;
284 SFC_ASSERT(rte_is_power_of_2(max_entries));
286 rxq_info->max_entries = max_entries;
292 sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
296 switch (rxmode->mq_mode) {
298 /* No special checks are required */
301 sfc_err(sa, "Rx multi-queue mode %u not supported",
306 if (rxmode->header_split) {
307 sfc_err(sa, "Header split on Rx not supported");
311 if (rxmode->hw_vlan_filter) {
312 sfc_err(sa, "HW VLAN filtering not supported");
316 if (rxmode->hw_vlan_strip) {
317 sfc_err(sa, "HW VLAN stripping not supported");
321 if (rxmode->hw_vlan_extend) {
323 "Q-in-Q HW VLAN stripping not supported");
327 if (!rxmode->hw_strip_crc) {
329 "FCS stripping control not supported - always stripped");
330 rxmode->hw_strip_crc = 1;
333 if (rxmode->enable_scatter) {
334 sfc_err(sa, "Scatter on Rx not supported");
338 if (rxmode->enable_lro) {
339 sfc_err(sa, "LRO not supported");
347 * Initialize Rx subsystem.
349 * Called at device configuration stage when number of receive queues is
350 * specified together with other device level receive configuration.
352 * It should be used to allocate NUMA-unaware resources.
355 sfc_rx_init(struct sfc_adapter *sa)
357 struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
358 unsigned int sw_index;
361 rc = sfc_rx_check_mode(sa, &dev_conf->rxmode);
363 goto fail_check_mode;
365 sa->rxq_count = sa->eth_dev->data->nb_rx_queues;
368 sa->rxq_info = rte_calloc_socket("sfc-rxqs", sa->rxq_count,
369 sizeof(struct sfc_rxq_info), 0,
371 if (sa->rxq_info == NULL)
372 goto fail_rxqs_alloc;
374 for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
375 rc = sfc_rx_qinit_info(sa, sw_index);
377 goto fail_rx_qinit_info;
383 rte_free(sa->rxq_info);
389 sfc_log_init(sa, "failed %d", rc);
394 * Shutdown Rx subsystem.
396 * Called at device close stage, for example, before device
397 * reconfiguration or shutdown.
400 sfc_rx_fini(struct sfc_adapter *sa)
402 unsigned int sw_index;
404 sw_index = sa->rxq_count;
405 while (sw_index-- > 0) {
406 if (sa->rxq_info[sw_index].rxq != NULL)
407 sfc_rx_qfini(sa, sw_index);
410 rte_free(sa->rxq_info);