extern "C" {
#endif
+/**
+ * Prime event queue to allow processed events to be reused.
+ *
+ * @param evq_prime Global address of the prime register
+ * @param evq_hw_index Event queue index
+ * @param evq_read_ptr Masked event qeueu read pointer
+ */
+static inline void
+sfc_ef100_evq_prime(volatile void *evq_prime, unsigned int evq_hw_index,
+ unsigned int evq_read_ptr)
+{
+ efx_dword_t dword;
+
+ EFX_POPULATE_DWORD_2(dword,
+ ERF_GZ_EVQ_ID, evq_hw_index,
+ ERF_GZ_IDX, evq_read_ptr);
+
+ /*
+ * EvQ prime on EF100 allows HW to reuse descriptors. So we
+ * should be sure that event descriptor reads are done.
+ * However, there is implicit data dependency here since we
+ * move past event if we have found out that the event has
+ * come (i.e. we read it) and we have processed it.
+ * So, no extra barriers are required here.
+ */
+ rte_write32_relaxed(dword.ed_u32[0], evq_prime);
+}
+
static inline bool
sfc_ef100_ev_present(const efx_qword_t *ev, bool phase_bit)
{
#define SFC_EF100_RXQ_EXCEPTION 0x4
#define SFC_EF100_RXQ_RSS_HASH 0x10
#define SFC_EF100_RXQ_USER_MARK 0x20
+#define SFC_EF100_RXQ_FLAG_INTR_EN 0x40
unsigned int ptr_mask;
unsigned int evq_phase_bit_shift;
unsigned int ready_pkts;
unsigned int completed;
unsigned int evq_read_ptr;
+ unsigned int evq_read_ptr_primed;
volatile efx_qword_t *evq_hw_ring;
struct sfc_ef100_rx_sw_desc *sw_ring;
uint64_t rearm_data;
uint16_t buf_size;
uint16_t prefix_size;
+ unsigned int evq_hw_index;
+ volatile void *evq_prime;
+
/* Used on refill */
unsigned int added;
unsigned int max_fill_level;
return container_of(dp_rxq, struct sfc_ef100_rxq, dp);
}
+static void
+sfc_ef100_rx_qprime(struct sfc_ef100_rxq *rxq)
+{
+ sfc_ef100_evq_prime(rxq->evq_prime, rxq->evq_hw_index,
+ rxq->evq_read_ptr & rxq->ptr_mask);
+ rxq->evq_read_ptr_primed = rxq->evq_read_ptr;
+}
+
static inline void
sfc_ef100_rx_qpush(struct sfc_ef100_rxq *rxq, unsigned int added)
{
/* It is not a problem if we refill in the case of exception */
sfc_ef100_rx_qrefill(rxq);
+ if ((rxq->flags & SFC_EF100_RXQ_FLAG_INTR_EN) &&
+ rxq->evq_read_ptr_primed != rxq->evq_read_ptr)
+ sfc_ef100_rx_qprime(rxq);
+
done:
return nb_pkts - (rx_pkts_end - rx_pkts);
}
ER_GZ_RX_RING_DOORBELL_OFST +
(info->hw_index << info->vi_window_shift);
+ rxq->evq_hw_index = info->evq_hw_index;
+ rxq->evq_prime = (volatile uint8_t *)info->mem_bar +
+ info->fcw_offset +
+ ER_GZ_EVQ_INT_PRIME_OFST;
+
sfc_ef100_rx_debug(rxq, "RxQ doorbell is %p", rxq->doorbell);
*dp_rxqp = &rxq->dp;
rxq->flags |= SFC_EF100_RXQ_STARTED;
rxq->flags &= ~(SFC_EF100_RXQ_NOT_RUNNING | SFC_EF100_RXQ_EXCEPTION);
+ if (rxq->flags & SFC_EF100_RXQ_FLAG_INTR_EN)
+ sfc_ef100_rx_qprime(rxq);
+
return 0;
}
rxq->flags &= ~SFC_EF100_RXQ_STARTED;
}
+static sfc_dp_rx_intr_enable_t sfc_ef100_rx_intr_enable;
+static int
+sfc_ef100_rx_intr_enable(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_ef100_rxq *rxq = sfc_ef100_rxq_by_dp_rxq(dp_rxq);
+
+ rxq->flags |= SFC_EF100_RXQ_FLAG_INTR_EN;
+ if (rxq->flags & SFC_EF100_RXQ_STARTED)
+ sfc_ef100_rx_qprime(rxq);
+ return 0;
+}
+
+static sfc_dp_rx_intr_disable_t sfc_ef100_rx_intr_disable;
+static int
+sfc_ef100_rx_intr_disable(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_ef100_rxq *rxq = sfc_ef100_rxq_by_dp_rxq(dp_rxq);
+
+ /* Cannot disarm, just disable rearm */
+ rxq->flags &= ~SFC_EF100_RXQ_FLAG_INTR_EN;
+ return 0;
+}
+
struct sfc_dp_rx sfc_ef100_rx = {
.dp = {
.name = SFC_KVARG_DATAPATH_EF100,
.type = SFC_DP_RX,
.hw_fw_caps = SFC_DP_HW_FW_CAP_EF100,
},
- .features = SFC_DP_RX_FEAT_MULTI_PROCESS,
+ .features = SFC_DP_RX_FEAT_MULTI_PROCESS |
+ SFC_DP_RX_FEAT_INTR,
.dev_offload_capa = 0,
.queue_offload_capa = DEV_RX_OFFLOAD_CHECKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
.supported_ptypes_get = sfc_ef100_supported_ptypes_get,
.qdesc_npending = sfc_ef100_rx_qdesc_npending,
.qdesc_status = sfc_ef100_rx_qdesc_status,
+ .intr_enable = sfc_ef100_rx_intr_enable,
+ .intr_disable = sfc_ef100_rx_intr_disable,
.pkt_burst = sfc_ef100_recv_pkts,
};