L3 checksum offload = Y
L4 checksum offload = Y
Packet type parsing = Y
+Rx descriptor status = Y
Basic stats = Y
Extended stats = Y
FW version = Y
/** Get number of pending Rx descriptors */
typedef unsigned int (sfc_dp_rx_qdesc_npending_t)(struct sfc_dp_rxq *dp_rxq);
+/** Check Rx descriptor status */
+typedef int (sfc_dp_rx_qdesc_status_t)(struct sfc_dp_rxq *dp_rxq,
+ uint16_t offset);
+
/** Receive datapath definition */
struct sfc_dp_rx {
struct sfc_dp dp;
sfc_dp_rx_qpurge_t *qpurge;
sfc_dp_rx_supported_ptypes_get_t *supported_ptypes_get;
sfc_dp_rx_qdesc_npending_t *qdesc_npending;
+ sfc_dp_rx_qdesc_status_t *qdesc_status;
eth_rx_burst_t pkt_burst;
};
return -ENOTSUP;
}
+static sfc_dp_rx_qdesc_status_t sfc_ef10_rx_qdesc_status;
+static int
+sfc_ef10_rx_qdesc_status(__rte_unused struct sfc_dp_rxq *dp_rxq,
+ __rte_unused uint16_t offset)
+{
+ return -ENOTSUP;
+}
+
static uint64_t
sfc_ef10_mk_mbuf_rearm_data(uint16_t port_id, uint16_t prefix_size)
.qpurge = sfc_ef10_rx_qpurge,
.supported_ptypes_get = sfc_ef10_supported_ptypes_get,
.qdesc_npending = sfc_ef10_rx_qdesc_npending,
+ .qdesc_status = sfc_ef10_rx_qdesc_status,
.pkt_burst = sfc_ef10_recv_pkts,
};
return sfc_rx_qdesc_done(dp_rxq, offset);
}
+static int
+sfc_rx_descriptor_status(void *queue, uint16_t offset)
+{
+ struct sfc_dp_rxq *dp_rxq = queue;
+ struct sfc_rxq *rxq = sfc_rxq_by_dp_rxq(dp_rxq);
+
+ return rxq->evq->sa->dp_rx->qdesc_status(dp_rxq, offset);
+}
+
static int
sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
.rx_queue_release = sfc_rx_queue_release,
.rx_queue_count = sfc_rx_queue_count,
.rx_descriptor_done = sfc_rx_descriptor_done,
+ .rx_descriptor_status = sfc_rx_descriptor_status,
.tx_queue_setup = sfc_tx_queue_setup,
.tx_queue_release = sfc_tx_queue_release,
.flow_ctrl_get = sfc_flow_ctrl_get,
return rxq->pending - rxq->completed;
}
+static sfc_dp_rx_qdesc_status_t sfc_efx_rx_qdesc_status;
+static int
+sfc_efx_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset)
+{
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+
+ if (unlikely(offset > rxq->ptr_mask))
+ return -EINVAL;
+
+ /*
+ * Poll EvQ to derive up-to-date 'rxq->pending' figure;
+ * it is required for the queue to be running, but the
+ * check is omitted because API design assumes that it
+ * is the duty of the caller to satisfy all conditions
+ */
+ SFC_ASSERT((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) ==
+ SFC_EFX_RXQ_FLAG_RUNNING);
+ sfc_ev_qpoll(rxq->evq);
+
+ /*
+ * There is a handful of reserved entries in the ring,
+ * but an explicit check whether the offset points to
+ * a reserved entry is neglected since the two checks
+ * below rely on the figures which take the HW limits
+ * into account and thus if an entry is reserved, the
+ * checks will fail and UNAVAIL code will be returned
+ */
+
+ if (offset < (rxq->pending - rxq->completed))
+ return RTE_ETH_RX_DESC_DONE;
+
+ if (offset < (rxq->added - rxq->completed))
+ return RTE_ETH_RX_DESC_AVAIL;
+
+ return RTE_ETH_RX_DESC_UNAVAIL;
+}
+
struct sfc_rxq *
sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
{
.qpurge = sfc_efx_rx_qpurge,
.supported_ptypes_get = sfc_efx_supported_ptypes_get,
.qdesc_npending = sfc_efx_rx_qdesc_npending,
+ .qdesc_status = sfc_efx_rx_qdesc_status,
.pkt_burst = sfc_efx_recv_pkts,
};