sa->priv.shared->tunnel_encaps =
encp->enc_tunnel_encapsulations_supported;
- if (sa->priv.dp_tx->features & SFC_DP_TX_FEAT_TSO) {
+ if (sfc_dp_tx_offload_capa(sa->priv.dp_tx) & DEV_TX_OFFLOAD_TCP_TSO) {
sa->tso = encp->enc_fw_assisted_tso_v2_enabled;
if (!sa->tso)
sfc_info(sa, "TSO support isn't available on this adapter");
}
- if (sa->tso && sa->priv.dp_tx->features & SFC_DP_TX_FEAT_TSO_ENCAP) {
+ if (sa->tso &&
+ (sfc_dp_tx_offload_capa(sa->priv.dp_tx) &
+ (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) != 0) {
sa->tso_encap = encp->enc_fw_assisted_tso_v2_encap_enabled;
if (!sa->tso_encap)
sfc_info(sa, "Encapsulated TSO support isn't available on this adapter");
struct sfc_dp dp;
unsigned int features;
-#define SFC_DP_TX_FEAT_VLAN_INSERT 0x1
-#define SFC_DP_TX_FEAT_TSO 0x2
-#define SFC_DP_TX_FEAT_MULTI_SEG 0x4
-#define SFC_DP_TX_FEAT_MULTI_PROCESS 0x8
-#define SFC_DP_TX_FEAT_MULTI_POOL 0x10
-#define SFC_DP_TX_FEAT_REFCNT 0x20
-#define SFC_DP_TX_FEAT_TSO_ENCAP 0x40
+#define SFC_DP_TX_FEAT_MULTI_PROCESS 0x1
+ /**
+ * Tx offload capabilities supported by the datapath on device
+ * level only if HW/FW supports it.
+ */
+ uint64_t dev_offload_capa;
+ /**
+ * Tx offload capabilities supported by the datapath per-queue
+ * if HW/FW supports it.
+ */
+ uint64_t queue_offload_capa;
sfc_dp_tx_get_dev_info_t *get_dev_info;
sfc_dp_tx_qsize_up_rings_t *qsize_up_rings;
sfc_dp_tx_qcreate_t *qcreate;
/** Get Tx datapath ops by the datapath TxQ handle */
const struct sfc_dp_tx *sfc_dp_tx_by_dp_txq(const struct sfc_dp_txq *dp_txq);
+static inline uint64_t
+sfc_dp_tx_offload_capa(const struct sfc_dp_tx *dp_tx)
+{
+ return dp_tx->dev_offload_capa | dp_tx->queue_offload_capa;
+}
+
static inline int
sfc_dp_tx_prepare_pkt(struct rte_mbuf *m,
uint32_t tso_tcp_header_offset_limit,
.type = SFC_DP_TX,
.hw_fw_caps = SFC_DP_HW_FW_CAP_EF10,
},
- .features = SFC_DP_TX_FEAT_TSO |
- SFC_DP_TX_FEAT_TSO_ENCAP |
- SFC_DP_TX_FEAT_MULTI_SEG |
- SFC_DP_TX_FEAT_MULTI_POOL |
- SFC_DP_TX_FEAT_REFCNT |
- SFC_DP_TX_FEAT_MULTI_PROCESS,
+ .features = SFC_DP_TX_FEAT_MULTI_PROCESS,
+ .dev_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS,
+ .queue_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO,
.get_dev_info = sfc_ef10_get_dev_info,
.qsize_up_rings = sfc_ef10_tx_qsize_up_rings,
.qcreate = sfc_ef10_tx_qcreate,
.type = SFC_DP_TX,
},
.features = SFC_DP_TX_FEAT_MULTI_PROCESS,
+ .dev_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
+ .queue_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM,
.get_dev_info = sfc_ef10_get_dev_info,
.qsize_up_rings = sfc_ef10_tx_qsize_up_rings,
.qcreate = sfc_ef10_tx_qcreate,
*/
#define SFC_TX_QFLUSH_POLL_ATTEMPTS (2000)
-uint64_t
-sfc_tx_get_dev_offload_caps(struct sfc_adapter *sa)
+static uint64_t
+sfc_tx_get_offload_mask(struct sfc_adapter *sa)
{
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
- uint64_t caps = 0;
+ uint64_t no_caps = 0;
- if ((sa->priv.dp_tx->features & SFC_DP_TX_FEAT_VLAN_INSERT) &&
- encp->enc_hw_tx_insert_vlan_enabled)
- caps |= DEV_TX_OFFLOAD_VLAN_INSERT;
+ if (!encp->enc_hw_tx_insert_vlan_enabled)
+ no_caps |= DEV_TX_OFFLOAD_VLAN_INSERT;
- if (sa->priv.dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG)
- caps |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ if (!encp->enc_tunnel_encapsulations_supported)
+ no_caps |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
- if ((~sa->priv.dp_tx->features & SFC_DP_TX_FEAT_MULTI_POOL) &&
- (~sa->priv.dp_tx->features & SFC_DP_TX_FEAT_REFCNT))
- caps |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ if (!sa->tso)
+ no_caps |= DEV_TX_OFFLOAD_TCP_TSO;
- return caps;
+ if (!sa->tso_encap)
+ no_caps |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
+
+ return ~no_caps;
}
uint64_t
-sfc_tx_get_queue_offload_caps(struct sfc_adapter *sa)
+sfc_tx_get_dev_offload_caps(struct sfc_adapter *sa)
{
- const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
- uint64_t caps = 0;
-
- caps |= DEV_TX_OFFLOAD_IPV4_CKSUM;
- caps |= DEV_TX_OFFLOAD_UDP_CKSUM;
- caps |= DEV_TX_OFFLOAD_TCP_CKSUM;
-
- if (encp->enc_tunnel_encapsulations_supported)
- caps |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
-
- if (sa->tso)
- caps |= DEV_TX_OFFLOAD_TCP_TSO;
-
- if (sa->tso_encap)
- caps |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
+ return sa->priv.dp_tx->dev_offload_capa & sfc_tx_get_offload_mask(sa);
+}
- return caps;
+uint64_t
+sfc_tx_get_queue_offload_caps(struct sfc_adapter *sa)
+{
+ return sa->priv.dp_tx->queue_offload_capa & sfc_tx_get_offload_mask(sa);
}
static int
.type = SFC_DP_TX,
.hw_fw_caps = 0,
},
- .features = SFC_DP_TX_FEAT_VLAN_INSERT |
- SFC_DP_TX_FEAT_TSO |
- SFC_DP_TX_FEAT_MULTI_POOL |
- SFC_DP_TX_FEAT_REFCNT |
- SFC_DP_TX_FEAT_MULTI_SEG,
+ .features = 0,
+ .dev_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_MULTI_SEGS,
+ .queue_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO,
.qsize_up_rings = sfc_efx_tx_qsize_up_rings,
.qcreate = sfc_efx_tx_qcreate,
.qdestroy = sfc_efx_tx_qdestroy,