X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fsfc%2Fsfc_ef10_essb_rx.c;h=81c8f7fbd2e0a12f0eecbed3427e779f9125dfa0;hb=a77a72cdd7c2856290d5d3b50b38486a1b89ee96;hp=1df61ff39ba2c52fa129bb823d1407367e8d7ab5;hpb=390f9b8d82c9bb02b12efed1a810677082de2687;p=dpdk.git diff --git a/drivers/net/sfc/sfc_ef10_essb_rx.c b/drivers/net/sfc/sfc_ef10_essb_rx.c index 1df61ff39b..81c8f7fbd2 100644 --- a/drivers/net/sfc/sfc_ef10_essb_rx.c +++ b/drivers/net/sfc/sfc_ef10_essb_rx.c @@ -42,6 +42,31 @@ */ #define SFC_EF10_ESSB_RX_FAKE_BUF_SIZE 32 +/** + * Minimum number of Rx buffers the datapath allows to use. + * + * Each HW Rx descriptor has many Rx buffers. The number of buffers + * in one HW Rx descriptor is equal to size of contiguous block + * provided by Rx buffers memory pool. The contiguous block size + * depends on CONFIG_RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB and rte_mbuf + * data size specified on the memory pool creation. Typical rte_mbuf + * data size is about 2k which makes a bit less than 32 buffers in + * contiguous block with default bucket size equal to 64k. + * Since HW Rx descriptors are pushed by 8 (see SFC_EF10_RX_WPTR_ALIGN), + * it makes about 256 as required minimum. Double it in advertised + * minimum to allow for at least 2 refill blocks. + */ +#define SFC_EF10_ESSB_RX_DESCS_MIN 512 + +/** + * Number of Rx buffers should be aligned to. + * + * There are no extra requirements on alignment since actual number of + * pushed Rx buffers will be multiple by contiguous block size which + * is unknown beforehand. + */ +#define SFC_EF10_ESSB_RX_DESCS_ALIGN 1 + /** * Maximum number of descriptors/buffers in the Rx ring. * It should guarantee that corresponding event queue never overfill. @@ -297,6 +322,12 @@ sfc_ef10_essb_rx_get_pending(struct sfc_ef10_essb_rxq *rxq, const efx_qword_t *qwordp; uint16_t pkt_len; + /* Buffers to be discarded have 0 in packet type */ + if (unlikely(m->packet_type == 0)) { + rte_mempool_put(rxq->refill_mb_pool, m); + goto next_buf; + } + rx_pkts[n_rx_pkts++] = m; /* Parse pseudo-header */ @@ -316,13 +347,23 @@ sfc_ef10_essb_rx_get_pending(struct sfc_ef10_essb_rxq *rxq, m->ol_flags |= (PKT_RX_RSS_HASH * !!EFX_TEST_QWORD_BIT(*qwordp, - ES_EZ_ESSB_RX_PREFIX_HASH_VALID_LBN)); + ES_EZ_ESSB_RX_PREFIX_HASH_VALID_LBN)) | + (PKT_RX_FDIR_ID * + !!EFX_TEST_QWORD_BIT(*qwordp, + ES_EZ_ESSB_RX_PREFIX_MARK_VALID_LBN)) | + (PKT_RX_FDIR * + !!EFX_TEST_QWORD_BIT(*qwordp, + ES_EZ_ESSB_RX_PREFIX_MATCH_FLAG_LBN)); /* EFX_QWORD_FIELD converts little-endian to CPU */ m->hash.rss = EFX_QWORD_FIELD(*qwordp, ES_EZ_ESSB_RX_PREFIX_HASH); + m->hash.fdir.hi = + EFX_QWORD_FIELD(*qwordp, + ES_EZ_ESSB_RX_PREFIX_MARK); +next_buf: m = sfc_ef10_essb_next_mbuf(rxq, m); } while (todo_bufs-- > 0); } @@ -370,13 +411,45 @@ sfc_ef10_essb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, static sfc_dp_rx_qdesc_npending_t sfc_ef10_essb_rx_qdesc_npending; static unsigned int -sfc_ef10_essb_rx_qdesc_npending(__rte_unused struct sfc_dp_rxq *dp_rxq) +sfc_ef10_essb_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq) { - /* - * Correct implementation requires EvQ polling and events - * processing. - */ - return -ENOTSUP; + struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq); + const unsigned int evq_old_read_ptr = rxq->evq_read_ptr; + efx_qword_t rx_ev; + + if (unlikely(rxq->flags & (SFC_EF10_ESSB_RXQ_NOT_RUNNING | + SFC_EF10_ESSB_RXQ_EXCEPTION))) + return rxq->bufs_pending; + + while (sfc_ef10_essb_rx_event_get(rxq, &rx_ev)) { + /* + * DROP_EVENT is an internal to the NIC, software should + * never see it and, therefore, may ignore it. + */ + sfc_ef10_essb_rx_process_ev(rxq, rx_ev); + } + + sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->evq_ptr_mask, + evq_old_read_ptr, rxq->evq_read_ptr); + + return rxq->bufs_pending; +} + +static sfc_dp_rx_qdesc_status_t sfc_ef10_essb_rx_qdesc_status; +static int +sfc_ef10_essb_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset) +{ + struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq); + unsigned int pending = sfc_ef10_essb_rx_qdesc_npending(dp_rxq); + + if (offset < pending) + return RTE_ETH_RX_DESC_DONE; + + if (offset < (rxq->added - rxq->completed) * rxq->block_size + + rxq->left_in_completed - rxq->block_size) + return RTE_ETH_RX_DESC_AVAIL; + + return RTE_ETH_RX_DESC_UNAVAIL; } static sfc_dp_rx_get_dev_info_t sfc_ef10_essb_rx_get_dev_info; @@ -387,8 +460,20 @@ sfc_ef10_essb_rx_get_dev_info(struct rte_eth_dev_info *dev_info) * Number of descriptors just defines maximum number of pushed * descriptors (fill level). */ - dev_info->rx_desc_lim.nb_min = SFC_RX_REFILL_BULK; - dev_info->rx_desc_lim.nb_align = SFC_RX_REFILL_BULK; + dev_info->rx_desc_lim.nb_min = SFC_EF10_ESSB_RX_DESCS_MIN; + dev_info->rx_desc_lim.nb_align = SFC_EF10_ESSB_RX_DESCS_ALIGN; +} + +static sfc_dp_rx_pool_ops_supported_t sfc_ef10_essb_rx_pool_ops_supported; +static int +sfc_ef10_essb_rx_pool_ops_supported(const char *pool) +{ + SFC_ASSERT(pool != NULL); + + if (strcmp(pool, "bucket") == 0) + return 0; + + return -ENOTSUP; } static sfc_dp_rx_qsize_up_rings_t sfc_ef10_essb_rx_qsize_up_rings; @@ -593,29 +678,20 @@ static void sfc_ef10_essb_rx_qpurge(struct sfc_dp_rxq *dp_rxq) { struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq); - unsigned int i, j; + unsigned int i; const struct sfc_ef10_essb_rx_sw_desc *rxd; struct rte_mbuf *m; - if (rxq->completed != rxq->added && rxq->left_in_completed > 0) { - rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask]; - m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf, - rxq->block_size - rxq->left_in_completed); - do { - rxq->left_in_completed--; - rte_mempool_put(rxq->refill_mb_pool, m); - m = sfc_ef10_essb_next_mbuf(rxq, m); - } while (rxq->left_in_completed > 0); - rxq->completed++; - } - for (i = rxq->completed; i != rxq->added; ++i) { rxd = &rxq->sw_ring[i & rxq->rxq_ptr_mask]; - m = rxd->first_mbuf; - for (j = 0; j < rxq->block_size; ++j) { + m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf, + rxq->block_size - rxq->left_in_completed); + while (rxq->left_in_completed > 0) { rte_mempool_put(rxq->refill_mb_pool, m); m = sfc_ef10_essb_next_mbuf(rxq, m); + rxq->left_in_completed--; } + rxq->left_in_completed = rxq->block_size; } rxq->flags &= ~SFC_EF10_ESSB_RXQ_STARTED; @@ -628,8 +704,11 @@ struct sfc_dp_rx sfc_ef10_essb_rx = { .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10 | SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER, }, - .features = 0, + .features = SFC_DP_RX_FEAT_FLOW_FLAG | + SFC_DP_RX_FEAT_FLOW_MARK | + SFC_DP_RX_FEAT_CHECKSUM, .get_dev_info = sfc_ef10_essb_rx_get_dev_info, + .pool_ops_supported = sfc_ef10_essb_rx_pool_ops_supported, .qsize_up_rings = sfc_ef10_essb_rx_qsize_up_rings, .qcreate = sfc_ef10_essb_rx_qcreate, .qdestroy = sfc_ef10_essb_rx_qdestroy, @@ -639,5 +718,6 @@ struct sfc_dp_rx sfc_ef10_essb_rx = { .qpurge = sfc_ef10_essb_rx_qpurge, .supported_ptypes_get = sfc_ef10_supported_ptypes_get, .qdesc_npending = sfc_ef10_essb_rx_qdesc_npending, + .qdesc_status = sfc_ef10_essb_rx_qdesc_status, .pkt_burst = sfc_ef10_essb_recv_pkts, };