/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
+ * Copyright 2015 Mellanox Technologies, Ltd
*/
#ifndef RTE_PMD_MLX5_RXTX_H_
struct mlx5_rxq_data rxq; /* Data path structure. */
unsigned int socket; /* CPU socket ID for allocations. */
unsigned int irq:1; /* Whether IRQ is enabled. */
+ uint16_t idx; /* Queue index. */
};
/* Indirection table. */
uint16_t tso_en:1; /* When set hardware TSO is enabled. */
uint16_t tunnel_en:1;
/* When set TX offload for tunneled packets are supported. */
+ uint16_t swp_en:1; /* Whether SW parser is enabled. */
uint16_t mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */
uint16_t inline_max_packet_sz; /* Max packet size for inlining. */
struct mlx5_txq_ibv {
LIST_ENTRY(mlx5_txq_ibv) next; /* Pointer to the next element. */
rte_atomic32_t refcnt; /* Reference counter. */
+ struct mlx5_txq_ctrl *txq_ctrl; /* Pointer to the control queue. */
struct ibv_cq *cq; /* Completion Queue. */
struct ibv_qp *qp; /* Queue Pair. */
};
struct mlx5_txq_data txq; /* Data path structure. */
off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
volatile void *bf_reg_orig; /* Blueflame register from verbs. */
+ uint16_t idx; /* Queue index. */
};
/* mlx5_rxq.c */
/* mlx5_rxtx.c */
extern uint32_t mlx5_ptype_table[];
+extern uint8_t mlx5_cksum_table[];
+extern uint8_t mlx5_swp_types_table[];
void mlx5_set_ptype_table(void);
+void mlx5_set_cksum_table(void);
+void mlx5_set_swp_types_table(void);
uint16_t mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
uint16_t pkts_n);
uint16_t mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts,
(syndrome == MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR))
return 0;
if (!check_cqe_seen(cqe)) {
- ERROR("unexpected CQE error %u (0x%02x)"
- " syndrome 0x%02x",
- op_code, op_code, syndrome);
+ DRV_LOG(ERR,
+ "unexpected CQE error %u (0x%02x) syndrome"
+ " 0x%02x",
+ op_code, op_code, syndrome);
rte_hexdump(stderr, "MLX5 Error CQE:",
(const void *)((uintptr_t)err_cqe),
sizeof(*err_cqe));
} else if ((op_code != MLX5_CQE_RESP_SEND) &&
(op_code != MLX5_CQE_REQ)) {
if (!check_cqe_seen(cqe)) {
- ERROR("unexpected CQE opcode %u (0x%02x)",
- op_code, op_code);
+ DRV_LOG(ERR, "unexpected CQE opcode %u (0x%02x)",
+ op_code, op_code);
rte_hexdump(stderr, "MLX5 CQE:",
(const void *)((uintptr_t)cqe),
sizeof(*cqe));
if ((MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) ||
(MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_REQ_ERR)) {
if (!check_cqe_seen(cqe)) {
- ERROR("unexpected error CQE, TX stopped");
+ DRV_LOG(ERR, "unexpected error CQE, Tx stopped");
rte_hexdump(stderr, "MLX5 TXQ:",
(const void *)((uintptr_t)txq->wqes),
((1 << txq->wqe_n) *
} else {
struct rte_mempool *mp = mlx5_tx_mb2mp(mb);
- WARN("Failed to register mempool 0x%p(%s)",
- (void *)mp, mp->name);
+ DRV_LOG(WARNING, "failed to register mempool 0x%p(%s)",
+ (void *)mp, mp->name);
}
return (uint32_t)-1;
}
}
/**
- * Convert the Checksum offloads to Verbs.
+ * Convert mbuf to Verb SWP.
*
* @param txq_data
* Pointer to the Tx queue.
* @param buf
* Pointer to the mbuf.
+ * @param tso
+ * TSO offloads enabled.
+ * @param vlan
+ * VLAN offloads enabled
+ * @param offsets
+ * Pointer to the SWP header offsets.
+ * @param swp_types
+ * Pointer to the SWP header types.
+ */
+static __rte_always_inline void
+txq_mbuf_to_swp(struct mlx5_txq_data *txq, struct rte_mbuf *buf,
+ uint8_t tso, uint64_t vlan,
+ uint8_t *offsets, uint8_t *swp_types)
+{
+ uint64_t tunnel = buf->ol_flags & PKT_TX_TUNNEL_MASK;
+ uint16_t idx;
+ uint16_t off;
+ const uint64_t ol_flags_mask = PKT_TX_L4_MASK | PKT_TX_IPV6 |
+ PKT_TX_OUTER_IPV6;
+
+ if (likely(!tunnel || !txq->swp_en ||
+ (tunnel != PKT_TX_TUNNEL_UDP && tunnel != PKT_TX_TUNNEL_IP)))
+ return;
+ /*
+ * The index should have:
+ * bit[0:1] = PKT_TX_L4_MASK
+ * bit[4] = PKT_TX_IPV6
+ * bit[8] = PKT_TX_OUTER_IPV6
+ * bit[9] = PKT_TX_OUTER_UDP
+ */
+ idx = (buf->ol_flags & ol_flags_mask) >> 52;
+ if (tunnel == PKT_TX_TUNNEL_UDP)
+ idx |= 1 << 9;
+ *swp_types = mlx5_swp_types_table[idx];
+ /* swp offsets. */
+ off = buf->outer_l2_len + (vlan ? 4 : 0); /* Outer L3 offset. */
+ if (tso || (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM))
+ offsets[1] = off >> 1;
+ off += buf->outer_l3_len; /* Outer L4 offset. */
+ if (tunnel == PKT_TX_TUNNEL_UDP)
+ offsets[0] = off >> 1;
+ off += buf->l2_len; /* Inner L3 offset. */
+ if (tso || (buf->ol_flags & PKT_TX_IP_CKSUM))
+ offsets[3] = off >> 1;
+ off += buf->l3_len; /* Inner L4 offset. */
+ if (tso || ((buf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) ||
+ ((buf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM))
+ offsets[2] = off >> 1;
+}
+
+/**
+ * Convert the Checksum offloads to Verbs.
+ *
+ * @param buf
+ * Pointer to the mbuf.
*
* @return
- * the converted cs_flags.
+ * Converted checksum flags.
*/
static __rte_always_inline uint8_t
-txq_ol_cksum_to_cs(struct mlx5_txq_data *txq_data, struct rte_mbuf *buf)
+txq_ol_cksum_to_cs(struct rte_mbuf *buf)
{
- uint8_t cs_flags = 0;
-
- /* Should we enable HW CKSUM offload */
- if (buf->ol_flags &
- (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM |
- PKT_TX_OUTER_IP_CKSUM)) {
- if (txq_data->tunnel_en &&
- (buf->ol_flags &
- (PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN))) {
- cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
- MLX5_ETH_WQE_L4_INNER_CSUM;
- if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
- cs_flags |= MLX5_ETH_WQE_L3_CSUM;
- } else {
- cs_flags = MLX5_ETH_WQE_L3_CSUM |
- MLX5_ETH_WQE_L4_CSUM;
- }
- }
- return cs_flags;
+ uint32_t idx;
+ uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK);
+ const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK |
+ PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM;
+
+ /*
+ * The index should have:
+ * bit[0] = PKT_TX_TCP_SEG
+ * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
+ * bit[4] = PKT_TX_IP_CKSUM
+ * bit[8] = PKT_TX_OUTER_IP_CKSUM
+ * bit[9] = tunnel
+ */
+ idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
+ return mlx5_cksum_table[idx];
}
/**