* Added support for RSS RETA and hash configuration get API in a secondary
process.
* Added support for Rx packet types list in a secondary process.
+ * Added Tx prepare to do Tx offloads checks.
* **Updated Mellanox drivers.**
#include <rte_ethdev_driver.h>
#include "sfc_dp.h"
+#include "sfc_debug.h"
#ifdef __cplusplus
extern "C" {
sfc_dp_tx_qtx_ev_t *qtx_ev;
sfc_dp_tx_qreap_t *qreap;
sfc_dp_tx_qdesc_status_t *qdesc_status;
+ eth_tx_prep_t pkt_prepare;
eth_tx_burst_t pkt_burst;
};
/** Get Tx datapath ops by the datapath TxQ handle */
const struct sfc_dp_tx *sfc_dp_tx_by_dp_txq(const struct sfc_dp_txq *dp_txq);
+static inline int
+sfc_dp_tx_prepare_pkt(struct rte_mbuf *m)
+{
+#ifdef RTE_LIBRTE_SFC_EFX_DEBUG
+ int ret;
+
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ /*
+ * Negative error code is returned by rte_validate_tx_offload(),
+ * but positive are used inside net/sfc PMD.
+ */
+ SFC_ASSERT(ret < 0);
+ return -ret;
+ }
+#else
+ RTE_SET_USED(m);
+#endif
+
+ return 0;
+}
+
extern struct sfc_dp_tx sfc_efx_tx;
extern struct sfc_dp_tx sfc_ef10_tx;
extern struct sfc_dp_tx sfc_ef10_simple_tx;
sa->priv.dp_tx = dp_tx;
dev->rx_pkt_burst = dp_rx->pkt_burst;
+ dev->tx_pkt_prepare = dp_tx->pkt_prepare;
dev->tx_pkt_burst = dp_tx->pkt_burst;
dev->dev_ops = &sfc_eth_dev_ops;
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
dev->dev_ops = NULL;
+ dev->tx_pkt_prepare = NULL;
dev->rx_pkt_burst = NULL;
dev->tx_pkt_burst = NULL;
dev->process_private = sap;
dev->rx_pkt_burst = dp_rx->pkt_burst;
+ dev->tx_pkt_prepare = dp_tx->pkt_prepare;
dev->tx_pkt_burst = dp_tx->pkt_burst;
dev->dev_ops = &sfc_eth_dev_secondary_ops;
free(dev->process_private);
dev->process_private = NULL;
dev->dev_ops = NULL;
+ dev->tx_pkt_prepare = NULL;
dev->tx_pkt_burst = NULL;
dev->rx_pkt_burst = NULL;
}
uint8_t *tsoh;
const struct tcp_hdr *th;
efsys_dma_addr_t header_paddr;
- uint16_t packet_id;
+ uint16_t packet_id = 0;
uint32_t sent_seq;
struct rte_mbuf *m = *in_seg;
size_t nh_off = m->l2_len; /* IP header offset */
tsoh = rte_pktmbuf_mtod(m, uint8_t *);
}
- /* Handle IP header */
+ /*
+ * Handle IP header. Tx prepare has debug-only checks that offload flags
+ * are correctly filled in in TSO mbuf. Use zero IPID if there is no
+ * IPv4 flag. If the packet is still IPv4, HW will simply start from
+ * zero IPID.
+ */
if (m->ol_flags & PKT_TX_IPV4) {
const struct ipv4_hdr *iphe4;
iphe4 = (const struct ipv4_hdr *)(tsoh + nh_off);
rte_memcpy(&packet_id, &iphe4->packet_id, sizeof(uint16_t));
packet_id = rte_be_to_cpu_16(packet_id);
- } else if (m->ol_flags & PKT_TX_IPV6) {
- packet_id = 0;
- } else {
- return EINVAL;
}
/* Handle TCP header */
return 1;
}
+static uint16_t
+sfc_efx_prepare_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t i;
+
+ for (i = 0; i < nb_pkts; i++) {
+ int ret;
+
+ ret = sfc_dp_tx_prepare_pkt(tx_pkts[i]);
+ if (unlikely(ret != 0)) {
+ rte_errno = ret;
+ break;
+ }
+ }
+
+ return i;
+}
+
static uint16_t
sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
.qstop = sfc_efx_tx_qstop,
.qreap = sfc_efx_tx_qreap,
.qdesc_status = sfc_efx_tx_qdesc_status,
+ .pkt_prepare = sfc_efx_prepare_pkts,
.pkt_burst = sfc_efx_xmit_pkts,
};