X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fsfc%2Fsfc_ef10_rx.c;h=f2fc6e70a18ebe48e08172895a2e23c0e74f7b00;hb=dda8e0e48723fc90e6222fcc1b04b94f240ae9b2;hp=9c921c05766a04e3c039402472ea2f4921b09d41;hpb=3be2268435762adf01547f5bddb917c5cd7c5066;p=dpdk.git diff --git a/drivers/net/sfc/sfc_ef10_rx.c b/drivers/net/sfc/sfc_ef10_rx.c index 9c921c0576..f2fc6e70a1 100644 --- a/drivers/net/sfc/sfc_ef10_rx.c +++ b/drivers/net/sfc/sfc_ef10_rx.c @@ -56,13 +56,17 @@ struct sfc_ef10_rxq { #define SFC_EF10_RXQ_NOT_RUNNING 0x2 #define SFC_EF10_RXQ_EXCEPTION 0x4 #define SFC_EF10_RXQ_RSS_HASH 0x8 +#define SFC_EF10_RXQ_FLAG_INTR_EN 0x10 unsigned int ptr_mask; unsigned int pending; unsigned int completed; unsigned int evq_read_ptr; + unsigned int evq_read_ptr_primed; efx_qword_t *evq_hw_ring; struct sfc_ef10_rx_sw_desc *sw_ring; uint64_t rearm_data; + struct rte_mbuf *scatter_pkt; + volatile void *evq_prime; uint16_t prefix_size; /* Used on refill */ @@ -84,6 +88,13 @@ sfc_ef10_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq) return container_of(dp_rxq, struct sfc_ef10_rxq, dp); } +static void +sfc_ef10_rx_qprime(struct sfc_ef10_rxq *rxq) +{ + sfc_ef10_ev_qprime(rxq->evq_prime, rxq->evq_read_ptr, rxq->ptr_mask); + rxq->evq_read_ptr_primed = rxq->evq_read_ptr; +} + static void sfc_ef10_rx_qrefill(struct sfc_ef10_rxq *rxq) { @@ -192,6 +203,8 @@ sfc_ef10_rx_pending(struct sfc_ef10_rxq *rxq, struct rte_mbuf **rx_pkts, { uint16_t n_rx_pkts = RTE_MIN(nb_pkts, rxq->pending - rxq->completed); + SFC_ASSERT(rxq->pending == rxq->completed || rxq->scatter_pkt == NULL); + if (n_rx_pkts != 0) { unsigned int completed = rxq->completed; @@ -230,11 +243,17 @@ sfc_ef10_rx_process_event(struct sfc_ef10_rxq *rxq, efx_qword_t rx_ev, struct rte_mbuf *m; struct rte_mbuf *m0; const uint8_t *pseudo_hdr; - uint16_t pkt_len; + uint16_t seg_len; ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) - pending) & EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS); - SFC_ASSERT(ready > 0); + + if (ready == 0) { + /* Rx abort - it was no enough descriptors for Rx packet */ + rte_pktmbuf_free(rxq->scatter_pkt); + rxq->scatter_pkt = NULL; + return rx_pkts; + } rxq->pending = pending + ready; @@ -250,15 +269,43 @@ sfc_ef10_rx_process_event(struct sfc_ef10_rxq *rxq, efx_qword_t rx_ev, return rx_pkts; } + /* If scattered packet is in progress */ + if (rxq->scatter_pkt != NULL) { + /* Events for scattered packet frags are not merged */ + SFC_ASSERT(ready == 1); + SFC_ASSERT(rxq->completed == pending); + + /* There is no pseudo-header in scatter segments. */ + seg_len = EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_BYTES); + + rxd = &rxq->sw_ring[pending++ & ptr_mask]; + m = rxd->mbuf; + + MBUF_RAW_ALLOC_CHECK(m); + + m->data_off = RTE_PKTMBUF_HEADROOM; + rte_pktmbuf_data_len(m) = seg_len; + rte_pktmbuf_pkt_len(m) = seg_len; + + rxq->scatter_pkt->nb_segs++; + rte_pktmbuf_pkt_len(rxq->scatter_pkt) += seg_len; + rte_pktmbuf_lastseg(rxq->scatter_pkt)->next = m; + + if (~rx_ev.eq_u64[0] & + rte_cpu_to_le_64(1ull << ESF_DZ_RX_CONT_LBN)) { + *rx_pkts++ = rxq->scatter_pkt; + rxq->scatter_pkt = NULL; + } + rxq->completed = pending; + return rx_pkts; + } + rxd = &rxq->sw_ring[pending++ & ptr_mask]; sfc_ef10_rx_prefetch_next(rxq, pending & ptr_mask); m = rxd->mbuf; - *rx_pkts++ = m; - rxq->completed = pending; - RTE_BUILD_BUG_ON(sizeof(m->rearm_data[0]) != sizeof(rxq->rearm_data)); m->rearm_data[0] = rxq->rearm_data; @@ -279,16 +326,27 @@ sfc_ef10_rx_process_event(struct sfc_ef10_rxq *rxq, efx_qword_t rx_ev, m->hash.rss = sfc_ef10_rx_pseudo_hdr_get_hash(pseudo_hdr); if (ready == 1) - pkt_len = EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_BYTES) - + seg_len = EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_BYTES) - rxq->prefix_size; else - pkt_len = sfc_ef10_rx_pseudo_hdr_get_len(pseudo_hdr); - SFC_ASSERT(pkt_len > 0); - rte_pktmbuf_data_len(m) = pkt_len; - rte_pktmbuf_pkt_len(m) = pkt_len; + seg_len = sfc_ef10_rx_pseudo_hdr_get_len(pseudo_hdr); + SFC_ASSERT(seg_len > 0); + rte_pktmbuf_data_len(m) = seg_len; + rte_pktmbuf_pkt_len(m) = seg_len; SFC_ASSERT(m->next == NULL); + if (~rx_ev.eq_u64[0] & rte_cpu_to_le_64(1ull << ESF_DZ_RX_CONT_LBN)) { + *rx_pkts++ = m; + rxq->completed = pending; + } else { + /* Events with CONT bit are not merged */ + SFC_ASSERT(ready == 1); + rxq->scatter_pkt = m; + rxq->completed = pending; + return rx_pkts; + } + /* Remember mbuf to copy offload flags and packet type from */ m0 = m; while (pending != rxq->pending) { @@ -321,10 +379,10 @@ sfc_ef10_rx_process_event(struct sfc_ef10_rxq *rxq, efx_qword_t rx_ev, */ m->hash.rss = sfc_ef10_rx_pseudo_hdr_get_hash(pseudo_hdr); - pkt_len = sfc_ef10_rx_pseudo_hdr_get_len(pseudo_hdr); - SFC_ASSERT(pkt_len > 0); - rte_pktmbuf_data_len(m) = pkt_len; - rte_pktmbuf_pkt_len(m) = pkt_len; + seg_len = sfc_ef10_rx_pseudo_hdr_get_len(pseudo_hdr); + SFC_ASSERT(seg_len > 0); + rte_pktmbuf_data_len(m) = seg_len; + rte_pktmbuf_pkt_len(m) = seg_len; SFC_ASSERT(m->next == NULL); } @@ -388,6 +446,10 @@ sfc_ef10_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) /* It is not a problem if we refill in the case of exception */ sfc_ef10_rx_qrefill(rxq); + if ((rxq->flags & SFC_EF10_RXQ_FLAG_INTR_EN) && + rxq->evq_read_ptr_primed != rxq->evq_read_ptr) + sfc_ef10_rx_qprime(rxq); + done: return nb_pkts - (rx_pkts_end - rx_pkts); } @@ -452,21 +514,53 @@ sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps) static sfc_dp_rx_qdesc_npending_t sfc_ef10_rx_qdesc_npending; static unsigned int -sfc_ef10_rx_qdesc_npending(__rte_unused struct sfc_dp_rxq *dp_rxq) +sfc_ef10_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq) { + struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq); + efx_qword_t rx_ev; + const unsigned int evq_old_read_ptr = rxq->evq_read_ptr; + unsigned int pending = rxq->pending; + unsigned int ready; + + if (unlikely(rxq->flags & + (SFC_EF10_RXQ_NOT_RUNNING | SFC_EF10_RXQ_EXCEPTION))) + goto done; + + while (sfc_ef10_rx_get_event(rxq, &rx_ev)) { + ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) - + pending) & + EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS); + pending += ready; + } + /* - * Correct implementation requires EvQ polling and events - * processing (keeping all ready mbufs in prepared). + * The function does not process events, so return event queue read + * pointer to the original position to allow the events that were + * read to be processed later */ - return -ENOTSUP; + rxq->evq_read_ptr = evq_old_read_ptr; + +done: + return pending - rxq->completed; } static sfc_dp_rx_qdesc_status_t sfc_ef10_rx_qdesc_status; static int -sfc_ef10_rx_qdesc_status(__rte_unused struct sfc_dp_rxq *dp_rxq, - __rte_unused uint16_t offset) +sfc_ef10_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset) { - return -ENOTSUP; + struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq); + unsigned int npending = sfc_ef10_rx_qdesc_npending(dp_rxq); + + if (unlikely(offset > rxq->ptr_mask)) + return -EINVAL; + + if (offset < npending) + return RTE_ETH_RX_DESC_DONE; + + if (offset < (rxq->added - rxq->completed)) + return RTE_ETH_RX_DESC_AVAIL; + + return RTE_ETH_RX_DESC_UNAVAIL; } @@ -486,6 +580,7 @@ sfc_ef10_rx_get_dev_info(struct rte_eth_dev_info *dev_info) static sfc_dp_rx_qsize_up_rings_t sfc_ef10_rx_qsize_up_rings; static int sfc_ef10_rx_qsize_up_rings(uint16_t nb_rx_desc, + struct sfc_dp_rx_hw_limits *limits, __rte_unused struct rte_mempool *mb_pool, unsigned int *rxq_entries, unsigned int *evq_entries, @@ -495,8 +590,8 @@ sfc_ef10_rx_qsize_up_rings(uint16_t nb_rx_desc, * rte_ethdev API guarantees that the number meets min, max and * alignment requirements. */ - if (nb_rx_desc <= EFX_RXQ_MINNDESCS) - *rxq_entries = EFX_RXQ_MINNDESCS; + if (nb_rx_desc <= limits->rxq_min_entries) + *rxq_entries = limits->rxq_min_entries; else *rxq_entries = rte_align32pow2(nb_rx_desc); @@ -572,6 +667,9 @@ sfc_ef10_rx_qcreate(uint16_t port_id, uint16_t queue_id, rxq->doorbell = (volatile uint8_t *)info->mem_bar + ER_DZ_RX_DESC_UPD_REG_OFST + (info->hw_index << info->vi_window_shift); + rxq->evq_prime = (volatile uint8_t *)info->mem_bar + + ER_DZ_EVQ_RPTR_REG_OFST + + (info->evq_hw_index << info->vi_window_shift); *dp_rxqp = &rxq->dp; return 0; @@ -611,6 +709,9 @@ sfc_ef10_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr) rxq->flags |= SFC_EF10_RXQ_STARTED; rxq->flags &= ~(SFC_EF10_RXQ_NOT_RUNNING | SFC_EF10_RXQ_EXCEPTION); + if (rxq->flags & SFC_EF10_RXQ_FLAG_INTR_EN) + sfc_ef10_rx_qprime(rxq); + return 0; } @@ -649,6 +750,9 @@ sfc_ef10_rx_qpurge(struct sfc_dp_rxq *dp_rxq) unsigned int i; struct sfc_ef10_rx_sw_desc *rxd; + rte_pktmbuf_free(rxq->scatter_pkt); + rxq->scatter_pkt = NULL; + for (i = rxq->completed; i != rxq->added; ++i) { rxd = &rxq->sw_ring[i & rxq->ptr_mask]; rte_mbuf_raw_free(rxd->mbuf); @@ -660,6 +764,29 @@ sfc_ef10_rx_qpurge(struct sfc_dp_rxq *dp_rxq) rxq->flags &= ~SFC_EF10_RXQ_STARTED; } +static sfc_dp_rx_intr_enable_t sfc_ef10_rx_intr_enable; +static int +sfc_ef10_rx_intr_enable(struct sfc_dp_rxq *dp_rxq) +{ + struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq); + + rxq->flags |= SFC_EF10_RXQ_FLAG_INTR_EN; + if (rxq->flags & SFC_EF10_RXQ_STARTED) + sfc_ef10_rx_qprime(rxq); + return 0; +} + +static sfc_dp_rx_intr_disable_t sfc_ef10_rx_intr_disable; +static int +sfc_ef10_rx_intr_disable(struct sfc_dp_rxq *dp_rxq) +{ + struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq); + + /* Cannot disarm, just disable rearm */ + rxq->flags &= ~SFC_EF10_RXQ_FLAG_INTR_EN; + return 0; +} + struct sfc_dp_rx sfc_ef10_rx = { .dp = { .name = SFC_KVARG_DATAPATH_EF10, @@ -667,8 +794,10 @@ struct sfc_dp_rx sfc_ef10_rx = { .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10, }, .features = SFC_DP_RX_FEAT_MULTI_PROCESS | - SFC_DP_RX_FEAT_TUNNELS | - SFC_DP_RX_FEAT_CHECKSUM, + SFC_DP_RX_FEAT_INTR, + .dev_offload_capa = DEV_RX_OFFLOAD_CHECKSUM | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, + .queue_offload_capa = DEV_RX_OFFLOAD_SCATTER, .get_dev_info = sfc_ef10_rx_get_dev_info, .qsize_up_rings = sfc_ef10_rx_qsize_up_rings, .qcreate = sfc_ef10_rx_qcreate, @@ -680,5 +809,7 @@ struct sfc_dp_rx sfc_ef10_rx = { .supported_ptypes_get = sfc_ef10_supported_ptypes_get, .qdesc_npending = sfc_ef10_rx_qdesc_npending, .qdesc_status = sfc_ef10_rx_qdesc_status, + .intr_enable = sfc_ef10_rx_intr_enable, + .intr_disable = sfc_ef10_rx_intr_disable, .pkt_burst = sfc_ef10_recv_pkts, };