struct sfc_dp_queue dpq;
};
+/** Datapath receive queue descriptor number limitations */
+struct sfc_dp_rx_hw_limits {
+ unsigned int rxq_max_entries;
+ unsigned int rxq_min_entries;
+ unsigned int evq_max_entries;
+ unsigned int evq_min_entries;
+};
+
/**
* Datapath receive queue creation information.
*
/** DMA-mapped Rx descriptors ring */
void *rxq_hw_ring;
+ /** Event queue index in hardware */
+ unsigned int evq_hw_index;
/** Associated event queue size */
unsigned int evq_entries;
/** Hardware event ring */
* @return 0 or positive errno.
*/
typedef int (sfc_dp_rx_qsize_up_rings_t)(uint16_t nb_rx_desc,
+ struct sfc_dp_rx_hw_limits *limits,
struct rte_mempool *mb_pool,
unsigned int *rxq_entries,
unsigned int *evq_entries,
/** Check Rx descriptor status */
typedef int (sfc_dp_rx_qdesc_status_t)(struct sfc_dp_rxq *dp_rxq,
uint16_t offset);
+/** Enable Rx interrupts */
+typedef int (sfc_dp_rx_intr_enable_t)(struct sfc_dp_rxq *dp_rxq);
+
+/** Disable Rx interrupts */
+typedef int (sfc_dp_rx_intr_disable_t)(struct sfc_dp_rxq *dp_rxq);
/** Receive datapath definition */
struct sfc_dp_rx {
struct sfc_dp dp;
unsigned int features;
-#define SFC_DP_RX_FEAT_SCATTER 0x1
-#define SFC_DP_RX_FEAT_MULTI_PROCESS 0x2
-#define SFC_DP_RX_FEAT_TUNNELS 0x4
-#define SFC_DP_RX_FEAT_FLOW_FLAG 0x8
-#define SFC_DP_RX_FEAT_FLOW_MARK 0x10
-#define SFC_DP_RX_FEAT_CHECKSUM 0x20
+#define SFC_DP_RX_FEAT_MULTI_PROCESS 0x1
+#define SFC_DP_RX_FEAT_FLOW_FLAG 0x2
+#define SFC_DP_RX_FEAT_FLOW_MARK 0x4
+#define SFC_DP_RX_FEAT_INTR 0x8
+ /**
+ * Rx offload capabilities supported by the datapath on device
+ * level only if HW/FW supports it.
+ */
+ uint64_t dev_offload_capa;
+ /**
+ * Rx offload capabilities supported by the datapath per-queue
+ * if HW/FW supports it.
+ */
+ uint64_t queue_offload_capa;
sfc_dp_rx_get_dev_info_t *get_dev_info;
sfc_dp_rx_pool_ops_supported_t *pool_ops_supported;
sfc_dp_rx_qsize_up_rings_t *qsize_up_rings;
sfc_dp_rx_supported_ptypes_get_t *supported_ptypes_get;
sfc_dp_rx_qdesc_npending_t *qdesc_npending;
sfc_dp_rx_qdesc_status_t *qdesc_status;
+ sfc_dp_rx_intr_enable_t *intr_enable;
+ sfc_dp_rx_intr_disable_t *intr_disable;
eth_rx_burst_t pkt_burst;
};
return (p == NULL) ? NULL : container_of(p, struct sfc_dp_rx, dp);
}
+static inline uint64_t
+sfc_dp_rx_offload_capa(const struct sfc_dp_rx *dp_rx)
+{
+ return dp_rx->dev_offload_capa | dp_rx->queue_offload_capa;
+}
+
+/** Get Rx datapath ops by the datapath RxQ handle */
+const struct sfc_dp_rx *sfc_dp_rx_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq);
+
extern struct sfc_dp_rx sfc_efx_rx;
extern struct sfc_dp_rx sfc_ef10_rx;
extern struct sfc_dp_rx sfc_ef10_essb_rx;