struct sfc_dp dp;
unsigned int features;
-#define SFC_DP_RX_FEAT_SCATTER 0x1
-#define SFC_DP_RX_FEAT_MULTI_PROCESS 0x2
-#define SFC_DP_RX_FEAT_TUNNELS 0x4
-#define SFC_DP_RX_FEAT_FLOW_FLAG 0x8
-#define SFC_DP_RX_FEAT_FLOW_MARK 0x10
-#define SFC_DP_RX_FEAT_CHECKSUM 0x20
+#define SFC_DP_RX_FEAT_MULTI_PROCESS 0x1
+#define SFC_DP_RX_FEAT_FLOW_FLAG 0x2
+#define SFC_DP_RX_FEAT_FLOW_MARK 0x4
+ /**
+ * Rx offload capabilities supported by the datapath on device
+ * level only if HW/FW supports it.
+ */
+ uint64_t dev_offload_capa;
+ /**
+ * Rx offload capabilities supported by the datapath per-queue
+ * if HW/FW supports it.
+ */
+ uint64_t queue_offload_capa;
sfc_dp_rx_get_dev_info_t *get_dev_info;
sfc_dp_rx_pool_ops_supported_t *pool_ops_supported;
sfc_dp_rx_qsize_up_rings_t *qsize_up_rings;
return (p == NULL) ? NULL : container_of(p, struct sfc_dp_rx, dp);
}
+static inline uint64_t
+sfc_dp_rx_offload_capa(const struct sfc_dp_rx *dp_rx)
+{
+ return dp_rx->dev_offload_capa | dp_rx->queue_offload_capa;
+}
+
/** Get Rx datapath ops by the datapath RxQ handle */
const struct sfc_dp_rx *sfc_dp_rx_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq);
SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER,
},
.features = SFC_DP_RX_FEAT_FLOW_FLAG |
- SFC_DP_RX_FEAT_FLOW_MARK |
- SFC_DP_RX_FEAT_CHECKSUM,
+ SFC_DP_RX_FEAT_FLOW_MARK,
+ .dev_offload_capa = DEV_RX_OFFLOAD_CHECKSUM,
+ .queue_offload_capa = 0,
.get_dev_info = sfc_ef10_essb_rx_get_dev_info,
.pool_ops_supported = sfc_ef10_essb_rx_pool_ops_supported,
.qsize_up_rings = sfc_ef10_essb_rx_qsize_up_rings,
.type = SFC_DP_RX,
.hw_fw_caps = SFC_DP_HW_FW_CAP_EF10,
},
- .features = SFC_DP_RX_FEAT_SCATTER |
- SFC_DP_RX_FEAT_MULTI_PROCESS |
- SFC_DP_RX_FEAT_TUNNELS |
- SFC_DP_RX_FEAT_CHECKSUM,
+ .features = SFC_DP_RX_FEAT_MULTI_PROCESS,
+ .dev_offload_capa = DEV_RX_OFFLOAD_CHECKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM,
+ .queue_offload_capa = DEV_RX_OFFLOAD_SCATTER,
.get_dev_info = sfc_ef10_rx_get_dev_info,
.qsize_up_rings = sfc_ef10_rx_qsize_up_rings,
.qcreate = sfc_ef10_rx_qcreate,
.type = SFC_DP_RX,
.hw_fw_caps = 0,
},
- .features = SFC_DP_RX_FEAT_SCATTER |
- SFC_DP_RX_FEAT_CHECKSUM,
+ .features = 0,
+ .dev_offload_capa = DEV_RX_OFFLOAD_CHECKSUM,
+ .queue_offload_capa = DEV_RX_OFFLOAD_SCATTER,
.qsize_up_rings = sfc_efx_rx_qsize_up_rings,
.qcreate = sfc_efx_rx_qcreate,
.qdestroy = sfc_efx_rx_qdestroy,
sfc_ev_qstop(rxq->evq);
}
-uint64_t
-sfc_rx_get_dev_offload_caps(struct sfc_adapter *sa)
+static uint64_t
+sfc_rx_get_offload_mask(struct sfc_adapter *sa)
{
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
- uint64_t caps = 0;
+ uint64_t no_caps = 0;
- caps |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ if (encp->enc_tunnel_encapsulations_supported == 0)
+ no_caps |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
- if (sa->priv.dp_rx->features & SFC_DP_RX_FEAT_CHECKSUM) {
- caps |= DEV_RX_OFFLOAD_IPV4_CKSUM;
- caps |= DEV_RX_OFFLOAD_UDP_CKSUM;
- caps |= DEV_RX_OFFLOAD_TCP_CKSUM;
- }
+ return ~no_caps;
+}
- if (encp->enc_tunnel_encapsulations_supported &&
- (sa->priv.dp_rx->features & SFC_DP_RX_FEAT_TUNNELS))
- caps |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+uint64_t
+sfc_rx_get_dev_offload_caps(struct sfc_adapter *sa)
+{
+ uint64_t caps = sa->priv.dp_rx->dev_offload_capa;
+
+ caps |= DEV_RX_OFFLOAD_JUMBO_FRAME;
- return caps;
+ return caps & sfc_rx_get_offload_mask(sa);
}
uint64_t
sfc_rx_get_queue_offload_caps(struct sfc_adapter *sa)
{
- uint64_t caps = 0;
-
- if (sa->priv.dp_rx->features & SFC_DP_RX_FEAT_SCATTER)
- caps |= DEV_RX_OFFLOAD_SCATTER;
-
- return caps;
+ return sa->priv.dp_rx->queue_offload_capa & sfc_rx_get_offload_mask(sa);
}
static int
EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
if ((encp->enc_tunnel_encapsulations_supported != 0) &&
- (sa->priv.dp_rx->features & SFC_DP_RX_FEAT_TUNNELS))
+ (sfc_dp_rx_offload_capa(sa->priv.dp_rx) &
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) != 0)
rxq_info->type_flags |= EFX_RXQ_FLAG_INNER_CLASSES;
rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_RX, sw_index,