X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fsfc%2Fsfc_ef10_rx.c;h=f2fc6e70a18ebe48e08172895a2e23c0e74f7b00;hb=d1c2f76b440a7468878d246332d9a6ebda5deb43;hp=5c0152d01f134928f6d9708ab93b900f1a2871c8;hpb=f609ee3f2a32806689e0f1d9d841c97e0409bf55;p=dpdk.git diff --git a/drivers/net/sfc/sfc_ef10_rx.c b/drivers/net/sfc/sfc_ef10_rx.c index 5c0152d01f..f2fc6e70a1 100644 --- a/drivers/net/sfc/sfc_ef10_rx.c +++ b/drivers/net/sfc/sfc_ef10_rx.c @@ -56,13 +56,17 @@ struct sfc_ef10_rxq { #define SFC_EF10_RXQ_NOT_RUNNING 0x2 #define SFC_EF10_RXQ_EXCEPTION 0x4 #define SFC_EF10_RXQ_RSS_HASH 0x8 +#define SFC_EF10_RXQ_FLAG_INTR_EN 0x10 unsigned int ptr_mask; - unsigned int prepared; + unsigned int pending; unsigned int completed; unsigned int evq_read_ptr; + unsigned int evq_read_ptr_primed; efx_qword_t *evq_hw_ring; struct sfc_ef10_rx_sw_desc *sw_ring; uint64_t rearm_data; + struct rte_mbuf *scatter_pkt; + volatile void *evq_prime; uint16_t prefix_size; /* Used on refill */ @@ -84,6 +88,13 @@ sfc_ef10_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq) return container_of(dp_rxq, struct sfc_ef10_rxq, dp); } +static void +sfc_ef10_rx_qprime(struct sfc_ef10_rxq *rxq) +{ + sfc_ef10_ev_qprime(rxq->evq_prime, rxq->evq_read_ptr, rxq->ptr_mask); + rxq->evq_read_ptr_primed = rxq->evq_read_ptr; +} + static void sfc_ef10_rx_qrefill(struct sfc_ef10_rxq *rxq) { @@ -133,6 +144,8 @@ sfc_ef10_rx_qrefill(struct sfc_ef10_rxq *rxq) struct sfc_ef10_rx_sw_desc *rxd; rte_iova_t phys_addr; + MBUF_RAW_ALLOC_CHECK(m); + SFC_ASSERT((id & ~ptr_mask) == 0); rxd = &rxq->sw_ring[id]; rxd->mbuf = m; @@ -184,21 +197,26 @@ sfc_ef10_rx_prefetch_next(struct sfc_ef10_rxq *rxq, unsigned int next_id) } } -static uint16_t -sfc_ef10_rx_prepared(struct sfc_ef10_rxq *rxq, struct rte_mbuf **rx_pkts, - uint16_t nb_pkts) +static struct rte_mbuf ** +sfc_ef10_rx_pending(struct sfc_ef10_rxq *rxq, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) { - uint16_t n_rx_pkts = RTE_MIN(nb_pkts, rxq->prepared); - unsigned int completed = rxq->completed; - unsigned int i; + uint16_t n_rx_pkts = RTE_MIN(nb_pkts, rxq->pending - rxq->completed); + + SFC_ASSERT(rxq->pending == rxq->completed || rxq->scatter_pkt == NULL); - rxq->prepared -= n_rx_pkts; - rxq->completed = completed + n_rx_pkts; + if (n_rx_pkts != 0) { + unsigned int completed = rxq->completed; - for (i = 0; i < n_rx_pkts; ++i, ++completed) - rx_pkts[i] = rxq->sw_ring[completed & rxq->ptr_mask].mbuf; + rxq->completed = completed + n_rx_pkts; - return n_rx_pkts; + do { + *rx_pkts++ = + rxq->sw_ring[completed++ & rxq->ptr_mask].mbuf; + } while (completed != rxq->completed); + } + + return rx_pkts; } static uint16_t @@ -213,47 +231,80 @@ sfc_ef10_rx_pseudo_hdr_get_hash(const uint8_t *pseudo_hdr) return rte_le_to_cpu_32(*(const uint32_t *)pseudo_hdr); } -static uint16_t +static struct rte_mbuf ** sfc_ef10_rx_process_event(struct sfc_ef10_rxq *rxq, efx_qword_t rx_ev, - struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + struct rte_mbuf **rx_pkts, + struct rte_mbuf ** const rx_pkts_end) { const unsigned int ptr_mask = rxq->ptr_mask; - unsigned int completed = rxq->completed; + unsigned int pending = rxq->pending; unsigned int ready; struct sfc_ef10_rx_sw_desc *rxd; struct rte_mbuf *m; struct rte_mbuf *m0; - uint16_t n_rx_pkts; const uint8_t *pseudo_hdr; - uint16_t pkt_len; + uint16_t seg_len; - ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) - completed) & + ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) - pending) & EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS); - SFC_ASSERT(ready > 0); + + if (ready == 0) { + /* Rx abort - it was no enough descriptors for Rx packet */ + rte_pktmbuf_free(rxq->scatter_pkt); + rxq->scatter_pkt = NULL; + return rx_pkts; + } + + rxq->pending = pending + ready; if (rx_ev.eq_u64[0] & rte_cpu_to_le_64((1ull << ESF_DZ_RX_ECC_ERR_LBN) | (1ull << ESF_DZ_RX_ECRC_ERR_LBN))) { - SFC_ASSERT(rxq->prepared == 0); - rxq->completed += ready; - while (ready-- > 0) { - rxd = &rxq->sw_ring[completed++ & ptr_mask]; - rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf); - } - return 0; + SFC_ASSERT(rxq->completed == pending); + do { + rxd = &rxq->sw_ring[pending++ & ptr_mask]; + rte_mbuf_raw_free(rxd->mbuf); + } while (pending != rxq->pending); + rxq->completed = pending; + return rx_pkts; } - n_rx_pkts = RTE_MIN(ready, nb_pkts); - rxq->prepared = ready - n_rx_pkts; - rxq->completed += n_rx_pkts; + /* If scattered packet is in progress */ + if (rxq->scatter_pkt != NULL) { + /* Events for scattered packet frags are not merged */ + SFC_ASSERT(ready == 1); + SFC_ASSERT(rxq->completed == pending); - rxd = &rxq->sw_ring[completed++ & ptr_mask]; + /* There is no pseudo-header in scatter segments. */ + seg_len = EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_BYTES); - sfc_ef10_rx_prefetch_next(rxq, completed & ptr_mask); + rxd = &rxq->sw_ring[pending++ & ptr_mask]; + m = rxd->mbuf; - m = rxd->mbuf; + MBUF_RAW_ALLOC_CHECK(m); + + m->data_off = RTE_PKTMBUF_HEADROOM; + rte_pktmbuf_data_len(m) = seg_len; + rte_pktmbuf_pkt_len(m) = seg_len; + + rxq->scatter_pkt->nb_segs++; + rte_pktmbuf_pkt_len(rxq->scatter_pkt) += seg_len; + rte_pktmbuf_lastseg(rxq->scatter_pkt)->next = m; + + if (~rx_ev.eq_u64[0] & + rte_cpu_to_le_64(1ull << ESF_DZ_RX_CONT_LBN)) { + *rx_pkts++ = rxq->scatter_pkt; + rxq->scatter_pkt = NULL; + } + rxq->completed = pending; + return rx_pkts; + } - *rx_pkts++ = m; + rxd = &rxq->sw_ring[pending++ & ptr_mask]; + + sfc_ef10_rx_prefetch_next(rxq, pending & ptr_mask); + + m = rxd->mbuf; RTE_BUILD_BUG_ON(sizeof(m->rearm_data[0]) != sizeof(rxq->rearm_data)); m->rearm_data[0] = rxq->rearm_data; @@ -275,27 +326,40 @@ sfc_ef10_rx_process_event(struct sfc_ef10_rxq *rxq, efx_qword_t rx_ev, m->hash.rss = sfc_ef10_rx_pseudo_hdr_get_hash(pseudo_hdr); if (ready == 1) - pkt_len = EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_BYTES) - + seg_len = EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_BYTES) - rxq->prefix_size; else - pkt_len = sfc_ef10_rx_pseudo_hdr_get_len(pseudo_hdr); - SFC_ASSERT(pkt_len > 0); - rte_pktmbuf_data_len(m) = pkt_len; - rte_pktmbuf_pkt_len(m) = pkt_len; + seg_len = sfc_ef10_rx_pseudo_hdr_get_len(pseudo_hdr); + SFC_ASSERT(seg_len > 0); + rte_pktmbuf_data_len(m) = seg_len; + rte_pktmbuf_pkt_len(m) = seg_len; SFC_ASSERT(m->next == NULL); + if (~rx_ev.eq_u64[0] & rte_cpu_to_le_64(1ull << ESF_DZ_RX_CONT_LBN)) { + *rx_pkts++ = m; + rxq->completed = pending; + } else { + /* Events with CONT bit are not merged */ + SFC_ASSERT(ready == 1); + rxq->scatter_pkt = m; + rxq->completed = pending; + return rx_pkts; + } + /* Remember mbuf to copy offload flags and packet type from */ m0 = m; - for (--ready; ready > 0; --ready) { - rxd = &rxq->sw_ring[completed++ & ptr_mask]; + while (pending != rxq->pending) { + rxd = &rxq->sw_ring[pending++ & ptr_mask]; - sfc_ef10_rx_prefetch_next(rxq, completed & ptr_mask); + sfc_ef10_rx_prefetch_next(rxq, pending & ptr_mask); m = rxd->mbuf; - if (ready > rxq->prepared) + if (rx_pkts != rx_pkts_end) { *rx_pkts++ = m; + rxq->completed = pending; + } RTE_BUILD_BUG_ON(sizeof(m->rearm_data[0]) != sizeof(rxq->rearm_data)); @@ -315,15 +379,15 @@ sfc_ef10_rx_process_event(struct sfc_ef10_rxq *rxq, efx_qword_t rx_ev, */ m->hash.rss = sfc_ef10_rx_pseudo_hdr_get_hash(pseudo_hdr); - pkt_len = sfc_ef10_rx_pseudo_hdr_get_len(pseudo_hdr); - SFC_ASSERT(pkt_len > 0); - rte_pktmbuf_data_len(m) = pkt_len; - rte_pktmbuf_pkt_len(m) = pkt_len; + seg_len = sfc_ef10_rx_pseudo_hdr_get_len(pseudo_hdr); + SFC_ASSERT(seg_len > 0); + rte_pktmbuf_data_len(m) = seg_len; + rte_pktmbuf_pkt_len(m) = seg_len; SFC_ASSERT(m->next == NULL); } - return n_rx_pkts; + return rx_pkts; } static bool @@ -355,26 +419,25 @@ static uint16_t sfc_ef10_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(rx_queue); + struct rte_mbuf ** const rx_pkts_end = &rx_pkts[nb_pkts]; unsigned int evq_old_read_ptr; - uint16_t n_rx_pkts; efx_qword_t rx_ev; - n_rx_pkts = sfc_ef10_rx_prepared(rxq, rx_pkts, nb_pkts); + rx_pkts = sfc_ef10_rx_pending(rxq, rx_pkts, nb_pkts); if (unlikely(rxq->flags & (SFC_EF10_RXQ_NOT_RUNNING | SFC_EF10_RXQ_EXCEPTION))) goto done; evq_old_read_ptr = rxq->evq_read_ptr; - while (n_rx_pkts != nb_pkts && sfc_ef10_rx_get_event(rxq, &rx_ev)) { + while (rx_pkts != rx_pkts_end && sfc_ef10_rx_get_event(rxq, &rx_ev)) { /* * DROP_EVENT is an internal to the NIC, software should * never see it and, therefore, may ignore it. */ - n_rx_pkts += sfc_ef10_rx_process_event(rxq, rx_ev, - rx_pkts + n_rx_pkts, - nb_pkts - n_rx_pkts); + rx_pkts = sfc_ef10_rx_process_event(rxq, rx_ev, + rx_pkts, rx_pkts_end); } sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->ptr_mask, evq_old_read_ptr, @@ -383,8 +446,12 @@ sfc_ef10_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) /* It is not a problem if we refill in the case of exception */ sfc_ef10_rx_qrefill(rxq); + if ((rxq->flags & SFC_EF10_RXQ_FLAG_INTR_EN) && + rxq->evq_read_ptr_primed != rxq->evq_read_ptr) + sfc_ef10_rx_qprime(rxq); + done: - return n_rx_pkts; + return nb_pkts - (rx_pkts_end - rx_pkts); } const uint32_t * @@ -447,21 +514,53 @@ sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps) static sfc_dp_rx_qdesc_npending_t sfc_ef10_rx_qdesc_npending; static unsigned int -sfc_ef10_rx_qdesc_npending(__rte_unused struct sfc_dp_rxq *dp_rxq) +sfc_ef10_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq) { + struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq); + efx_qword_t rx_ev; + const unsigned int evq_old_read_ptr = rxq->evq_read_ptr; + unsigned int pending = rxq->pending; + unsigned int ready; + + if (unlikely(rxq->flags & + (SFC_EF10_RXQ_NOT_RUNNING | SFC_EF10_RXQ_EXCEPTION))) + goto done; + + while (sfc_ef10_rx_get_event(rxq, &rx_ev)) { + ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) - + pending) & + EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS); + pending += ready; + } + /* - * Correct implementation requires EvQ polling and events - * processing (keeping all ready mbufs in prepared). + * The function does not process events, so return event queue read + * pointer to the original position to allow the events that were + * read to be processed later */ - return -ENOTSUP; + rxq->evq_read_ptr = evq_old_read_ptr; + +done: + return pending - rxq->completed; } static sfc_dp_rx_qdesc_status_t sfc_ef10_rx_qdesc_status; static int -sfc_ef10_rx_qdesc_status(__rte_unused struct sfc_dp_rxq *dp_rxq, - __rte_unused uint16_t offset) +sfc_ef10_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset) { - return -ENOTSUP; + struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq); + unsigned int npending = sfc_ef10_rx_qdesc_npending(dp_rxq); + + if (unlikely(offset > rxq->ptr_mask)) + return -EINVAL; + + if (offset < npending) + return RTE_ETH_RX_DESC_DONE; + + if (offset < (rxq->added - rxq->completed)) + return RTE_ETH_RX_DESC_AVAIL; + + return RTE_ETH_RX_DESC_UNAVAIL; } @@ -481,6 +580,7 @@ sfc_ef10_rx_get_dev_info(struct rte_eth_dev_info *dev_info) static sfc_dp_rx_qsize_up_rings_t sfc_ef10_rx_qsize_up_rings; static int sfc_ef10_rx_qsize_up_rings(uint16_t nb_rx_desc, + struct sfc_dp_rx_hw_limits *limits, __rte_unused struct rte_mempool *mb_pool, unsigned int *rxq_entries, unsigned int *evq_entries, @@ -490,8 +590,8 @@ sfc_ef10_rx_qsize_up_rings(uint16_t nb_rx_desc, * rte_ethdev API guarantees that the number meets min, max and * alignment requirements. */ - if (nb_rx_desc <= EFX_RXQ_MINNDESCS) - *rxq_entries = EFX_RXQ_MINNDESCS; + if (nb_rx_desc <= limits->rxq_min_entries) + *rxq_entries = limits->rxq_min_entries; else *rxq_entries = rte_align32pow2(nb_rx_desc); @@ -567,6 +667,9 @@ sfc_ef10_rx_qcreate(uint16_t port_id, uint16_t queue_id, rxq->doorbell = (volatile uint8_t *)info->mem_bar + ER_DZ_RX_DESC_UPD_REG_OFST + (info->hw_index << info->vi_window_shift); + rxq->evq_prime = (volatile uint8_t *)info->mem_bar + + ER_DZ_EVQ_RPTR_REG_OFST + + (info->evq_hw_index << info->vi_window_shift); *dp_rxqp = &rxq->dp; return 0; @@ -595,8 +698,8 @@ sfc_ef10_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr) { struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq); - SFC_ASSERT(rxq->prepared == 0); SFC_ASSERT(rxq->completed == 0); + SFC_ASSERT(rxq->pending == 0); SFC_ASSERT(rxq->added == 0); sfc_ef10_rx_qrefill(rxq); @@ -606,6 +709,9 @@ sfc_ef10_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr) rxq->flags |= SFC_EF10_RXQ_STARTED; rxq->flags &= ~(SFC_EF10_RXQ_NOT_RUNNING | SFC_EF10_RXQ_EXCEPTION); + if (rxq->flags & SFC_EF10_RXQ_FLAG_INTR_EN) + sfc_ef10_rx_qprime(rxq); + return 0; } @@ -644,19 +750,43 @@ sfc_ef10_rx_qpurge(struct sfc_dp_rxq *dp_rxq) unsigned int i; struct sfc_ef10_rx_sw_desc *rxd; - rxq->prepared = 0; + rte_pktmbuf_free(rxq->scatter_pkt); + rxq->scatter_pkt = NULL; for (i = rxq->completed; i != rxq->added; ++i) { rxd = &rxq->sw_ring[i & rxq->ptr_mask]; - rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf); + rte_mbuf_raw_free(rxd->mbuf); rxd->mbuf = NULL; } - rxq->completed = rxq->added = 0; + rxq->completed = rxq->pending = rxq->added = 0; rxq->flags &= ~SFC_EF10_RXQ_STARTED; } +static sfc_dp_rx_intr_enable_t sfc_ef10_rx_intr_enable; +static int +sfc_ef10_rx_intr_enable(struct sfc_dp_rxq *dp_rxq) +{ + struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq); + + rxq->flags |= SFC_EF10_RXQ_FLAG_INTR_EN; + if (rxq->flags & SFC_EF10_RXQ_STARTED) + sfc_ef10_rx_qprime(rxq); + return 0; +} + +static sfc_dp_rx_intr_disable_t sfc_ef10_rx_intr_disable; +static int +sfc_ef10_rx_intr_disable(struct sfc_dp_rxq *dp_rxq) +{ + struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq); + + /* Cannot disarm, just disable rearm */ + rxq->flags &= ~SFC_EF10_RXQ_FLAG_INTR_EN; + return 0; +} + struct sfc_dp_rx sfc_ef10_rx = { .dp = { .name = SFC_KVARG_DATAPATH_EF10, @@ -664,8 +794,10 @@ struct sfc_dp_rx sfc_ef10_rx = { .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10, }, .features = SFC_DP_RX_FEAT_MULTI_PROCESS | - SFC_DP_RX_FEAT_TUNNELS | - SFC_DP_RX_FEAT_CHECKSUM, + SFC_DP_RX_FEAT_INTR, + .dev_offload_capa = DEV_RX_OFFLOAD_CHECKSUM | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, + .queue_offload_capa = DEV_RX_OFFLOAD_SCATTER, .get_dev_info = sfc_ef10_rx_get_dev_info, .qsize_up_rings = sfc_ef10_rx_qsize_up_rings, .qcreate = sfc_ef10_rx_qcreate, @@ -677,5 +809,7 @@ struct sfc_dp_rx sfc_ef10_rx = { .supported_ptypes_get = sfc_ef10_supported_ptypes_get, .qdesc_npending = sfc_ef10_rx_qdesc_npending, .qdesc_status = sfc_ef10_rx_qdesc_status, + .intr_enable = sfc_ef10_rx_intr_enable, + .intr_disable = sfc_ef10_rx_intr_disable, .pkt_burst = sfc_ef10_recv_pkts, };