/** Disable Rx interrupts */
typedef int (sfc_dp_rx_intr_disable_t)(struct sfc_dp_rxq *dp_rxq);
+/** Get number of pushed Rx buffers */
+typedef unsigned int (sfc_dp_rx_get_pushed_t)(struct sfc_dp_rxq *dp_rxq);
+
/** Receive datapath definition */
struct sfc_dp_rx {
struct sfc_dp dp;
sfc_dp_rx_qdesc_status_t *qdesc_status;
sfc_dp_rx_intr_enable_t *intr_enable;
sfc_dp_rx_intr_disable_t *intr_disable;
+ sfc_dp_rx_get_pushed_t *get_pushed;
eth_rx_burst_t pkt_burst;
};
return 0;
}
+static sfc_dp_rx_get_pushed_t sfc_ef100_rx_get_pushed;
+static unsigned int
+sfc_ef100_rx_get_pushed(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_ef100_rxq *rxq = sfc_ef100_rxq_by_dp_rxq(dp_rxq);
+
+ /*
+ * The datapath keeps track only of added descriptors, since
+ * the number of pushed descriptors always equals the number
+ * of added descriptors due to enforced alignment.
+ */
+ return rxq->added;
+}
+
struct sfc_dp_rx sfc_ef100_rx = {
.dp = {
.name = SFC_KVARG_DATAPATH_EF100,
.qdesc_status = sfc_ef100_rx_qdesc_status,
.intr_enable = sfc_ef100_rx_intr_enable,
.intr_disable = sfc_ef100_rx_intr_disable,
+ .get_pushed = sfc_ef100_rx_get_pushed,
.pkt_burst = sfc_ef100_recv_pkts,
};
rxq_info->state &= ~SFC_RXQ_FLUSHING;
}
+/* This returns the running counter, which is not bounded by ring size */
+unsigned int
+sfc_rx_get_pushed(struct sfc_adapter *sa, struct sfc_dp_rxq *dp_rxq)
+{
+ SFC_ASSERT(sa->priv.dp_rx->get_pushed != NULL);
+
+ return sa->priv.dp_rx->get_pushed(dp_rxq);
+}
+
static int
sfc_efx_rx_qprime(struct sfc_efx_rxq *rxq)
{
void sfc_rx_qflush_done(struct sfc_rxq_info *rxq_info);
void sfc_rx_qflush_failed(struct sfc_rxq_info *rxq_info);
+unsigned int sfc_rx_get_pushed(struct sfc_adapter *sa,
+ struct sfc_dp_rxq *dp_rxq);
+
int sfc_rx_hash_init(struct sfc_adapter *sa);
void sfc_rx_hash_fini(struct sfc_adapter *sa);
int sfc_rx_hf_rte_to_efx(struct sfc_adapter *sa, uint64_t rte,