X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fsfc%2Fsfc_ef10_essb_rx.c;h=3bc136e040b9363e19c0ea81116320eb51dc222b;hb=9311beeea4963d8b97330c20ff1736b006486c5d;hp=5f5af602c0f80259a6c82bfd28c44977b4c96cc2;hpb=a654e29b50d9fe37e8f8a86b9d0ea27d629d0701;p=dpdk.git diff --git a/drivers/net/sfc/sfc_ef10_essb_rx.c b/drivers/net/sfc/sfc_ef10_essb_rx.c index 5f5af602c0..3bc136e040 100644 --- a/drivers/net/sfc/sfc_ef10_essb_rx.c +++ b/drivers/net/sfc/sfc_ef10_essb_rx.c @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause * - * Copyright (c) 2017-2018 Solarflare Communications Inc. - * All rights reserved. + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2017-2019 Solarflare Communications Inc. * * This software was jointly developed between OKTET Labs (under contract * for Solarflare) and Solarflare Communications, Inc. @@ -123,14 +123,22 @@ static struct rte_mbuf * sfc_ef10_essb_next_mbuf(const struct sfc_ef10_essb_rxq *rxq, struct rte_mbuf *mbuf) { - return (struct rte_mbuf *)((uintptr_t)mbuf + rxq->buf_stride); + struct rte_mbuf *m; + + m = (struct rte_mbuf *)((uintptr_t)mbuf + rxq->buf_stride); + MBUF_RAW_ALLOC_CHECK(m); + return m; } static struct rte_mbuf * sfc_ef10_essb_mbuf_by_index(const struct sfc_ef10_essb_rxq *rxq, struct rte_mbuf *mbuf, unsigned int idx) { - return (struct rte_mbuf *)((uintptr_t)mbuf + idx * rxq->buf_stride); + struct rte_mbuf *m; + + m = (struct rte_mbuf *)((uintptr_t)mbuf + idx * rxq->buf_stride); + MBUF_RAW_ALLOC_CHECK(m); + return m; } static struct rte_mbuf * @@ -322,6 +330,12 @@ sfc_ef10_essb_rx_get_pending(struct sfc_ef10_essb_rxq *rxq, const efx_qword_t *qwordp; uint16_t pkt_len; + /* Buffers to be discarded have 0 in packet type */ + if (unlikely(m->packet_type == 0)) { + rte_mbuf_raw_free(m); + goto next_buf; + } + rx_pkts[n_rx_pkts++] = m; /* Parse pseudo-header */ @@ -357,6 +371,7 @@ sfc_ef10_essb_rx_get_pending(struct sfc_ef10_essb_rxq *rxq, EFX_QWORD_FIELD(*qwordp, ES_EZ_ESSB_RX_PREFIX_MARK); +next_buf: m = sfc_ef10_essb_next_mbuf(rxq, m); } while (todo_bufs-- > 0); } @@ -404,21 +419,45 @@ sfc_ef10_essb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, static sfc_dp_rx_qdesc_npending_t sfc_ef10_essb_rx_qdesc_npending; static unsigned int -sfc_ef10_essb_rx_qdesc_npending(__rte_unused struct sfc_dp_rxq *dp_rxq) +sfc_ef10_essb_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq) { - /* - * Correct implementation requires EvQ polling and events - * processing. - */ - return -ENOTSUP; + struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq); + const unsigned int evq_old_read_ptr = rxq->evq_read_ptr; + efx_qword_t rx_ev; + + if (unlikely(rxq->flags & (SFC_EF10_ESSB_RXQ_NOT_RUNNING | + SFC_EF10_ESSB_RXQ_EXCEPTION))) + return rxq->bufs_pending; + + while (sfc_ef10_essb_rx_event_get(rxq, &rx_ev)) { + /* + * DROP_EVENT is an internal to the NIC, software should + * never see it and, therefore, may ignore it. + */ + sfc_ef10_essb_rx_process_ev(rxq, rx_ev); + } + + sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->evq_ptr_mask, + evq_old_read_ptr, rxq->evq_read_ptr); + + return rxq->bufs_pending; } static sfc_dp_rx_qdesc_status_t sfc_ef10_essb_rx_qdesc_status; static int -sfc_ef10_essb_rx_qdesc_status(__rte_unused struct sfc_dp_rxq *dp_rxq, - __rte_unused uint16_t offset) +sfc_ef10_essb_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset) { - return -ENOTSUP; + struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq); + unsigned int pending = sfc_ef10_essb_rx_qdesc_npending(dp_rxq); + + if (offset < pending) + return RTE_ETH_RX_DESC_DONE; + + if (offset < (rxq->added - rxq->completed) * rxq->block_size + + rxq->left_in_completed - rxq->block_size) + return RTE_ETH_RX_DESC_AVAIL; + + return RTE_ETH_RX_DESC_UNAVAIL; } static sfc_dp_rx_get_dev_info_t sfc_ef10_essb_rx_get_dev_info; @@ -448,6 +487,7 @@ sfc_ef10_essb_rx_pool_ops_supported(const char *pool) static sfc_dp_rx_qsize_up_rings_t sfc_ef10_essb_rx_qsize_up_rings; static int sfc_ef10_essb_rx_qsize_up_rings(uint16_t nb_rx_desc, + struct sfc_dp_rx_hw_limits *limits, struct rte_mempool *mb_pool, unsigned int *rxq_entries, unsigned int *evq_entries, @@ -474,11 +514,11 @@ sfc_ef10_essb_rx_qsize_up_rings(uint16_t nb_rx_desc, nb_hw_rx_desc = RTE_MAX(SFC_DIV_ROUND_UP(nb_rx_desc, mp_info.contig_block_size), SFC_EF10_RX_WPTR_ALIGN + 1); - if (nb_hw_rx_desc <= EFX_RXQ_MINNDESCS) { - *rxq_entries = EFX_RXQ_MINNDESCS; + if (nb_hw_rx_desc <= limits->rxq_min_entries) { + *rxq_entries = limits->rxq_min_entries; } else { *rxq_entries = rte_align32pow2(nb_hw_rx_desc); - if (*rxq_entries > EFX_RXQ_MAXNDESCS) + if (*rxq_entries > limits->rxq_max_entries) return EINVAL; } @@ -488,8 +528,8 @@ sfc_ef10_essb_rx_qsize_up_rings(uint16_t nb_rx_desc, 1 /* Rx error */ + 1 /* flush */ + 1 /* head-tail space */; *evq_entries = rte_align32pow2(max_events); - *evq_entries = RTE_MAX(*evq_entries, (unsigned int)EFX_EVQ_MINNEVS); - *evq_entries = RTE_MIN(*evq_entries, (unsigned int)EFX_EVQ_MAXNEVS); + *evq_entries = RTE_MAX(*evq_entries, limits->evq_min_entries); + *evq_entries = RTE_MIN(*evq_entries, limits->evq_max_entries); /* * May be even maximum event queue size is insufficient to handle @@ -647,29 +687,20 @@ static void sfc_ef10_essb_rx_qpurge(struct sfc_dp_rxq *dp_rxq) { struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq); - unsigned int i, j; + unsigned int i; const struct sfc_ef10_essb_rx_sw_desc *rxd; struct rte_mbuf *m; - if (rxq->completed != rxq->added && rxq->left_in_completed > 0) { - rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask]; - m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf, - rxq->block_size - rxq->left_in_completed); - do { - rxq->left_in_completed--; - rte_mempool_put(rxq->refill_mb_pool, m); - m = sfc_ef10_essb_next_mbuf(rxq, m); - } while (rxq->left_in_completed > 0); - rxq->completed++; - } - for (i = rxq->completed; i != rxq->added; ++i) { rxd = &rxq->sw_ring[i & rxq->rxq_ptr_mask]; - m = rxd->first_mbuf; - for (j = 0; j < rxq->block_size; ++j) { - rte_mempool_put(rxq->refill_mb_pool, m); + m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf, + rxq->block_size - rxq->left_in_completed); + while (rxq->left_in_completed > 0) { + rte_mbuf_raw_free(m); m = sfc_ef10_essb_next_mbuf(rxq, m); + rxq->left_in_completed--; } + rxq->left_in_completed = rxq->block_size; } rxq->flags &= ~SFC_EF10_ESSB_RXQ_STARTED; @@ -684,6 +715,9 @@ struct sfc_dp_rx sfc_ef10_essb_rx = { }, .features = SFC_DP_RX_FEAT_FLOW_FLAG | SFC_DP_RX_FEAT_FLOW_MARK, + .dev_offload_capa = DEV_RX_OFFLOAD_CHECKSUM | + DEV_RX_OFFLOAD_RSS_HASH, + .queue_offload_capa = 0, .get_dev_info = sfc_ef10_essb_rx_get_dev_info, .pool_ops_supported = sfc_ef10_essb_rx_pool_ops_supported, .qsize_up_rings = sfc_ef10_essb_rx_qsize_up_rings,