-/*-
- * BSD LICENSE
- *
- * Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox Technologies, Ltd
*/
#ifndef RTE_PMD_MLX5_RXTX_H_
#include <stddef.h>
#include <stdint.h>
+#include <sys/queue.h>
/* Verbs header. */
/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
#pragma GCC diagnostic ignored "-Wpedantic"
#endif
#include <infiniband/verbs.h>
-#include <infiniband/mlx5_hw.h>
+#include <infiniband/mlx5dv.h>
#ifdef PEDANTIC
#pragma GCC diagnostic error "-Wpedantic"
#endif
#include <rte_mempool.h>
#include <rte_common.h>
#include <rte_hexdump.h>
+#include <rte_atomic.h>
#include "mlx5_utils.h"
#include "mlx5.h"
uint64_t opackets; /**< Total of successfully sent packets. */
uint64_t obytes; /**< Total of successfully sent bytes. */
#endif
- uint64_t odropped; /**< Total of packets not sent when TX ring full. */
-};
-
-/* Flow director queue structure. */
-struct fdir_queue {
- struct ibv_qp *qp; /* Associated RX QP. */
- struct ibv_exp_rwq_ind_table *ind_table; /* Indirection table. */
- struct ibv_exp_wq *wq; /* Work queue. */
- struct ibv_cq *cq; /* Completion queue. */
+ uint64_t oerrors; /**< Total number of failed transmitted packets. */
};
struct priv;
+/* Memory region queue object. */
+struct mlx5_mr {
+ LIST_ENTRY(mlx5_mr) next; /**< Pointer to the next element. */
+ rte_atomic32_t refcnt; /*<< Reference counter. */
+ uint32_t lkey; /*<< rte_cpu_to_be_32(mr->lkey) */
+ uintptr_t start; /* Start address of MR */
+ uintptr_t end; /* End address of MR */
+ struct ibv_mr *mr; /*<< Memory Region. */
+ struct rte_mempool *mp; /*<< Memory Pool. */
+};
+
/* Compressed CQE context. */
struct rxq_zip {
uint16_t ai; /* Array index. */
};
/* RX queue descriptor. */
-struct rxq {
+struct mlx5_rxq_data {
unsigned int csum:1; /* Enable checksum offloading. */
unsigned int csum_l2tun:1; /* Same for L2 tunnels. */
+ unsigned int hw_timestamp:1; /* Enable HW timestamp. */
unsigned int vlan_strip:1; /* Enable VLAN stripping. */
unsigned int crc_present:1; /* CRC must be subtracted. */
unsigned int sges_n:2; /* Log 2 of SGEs (max buffers per packet). */
unsigned int cqe_n:4; /* Log 2 of CQ elements. */
unsigned int elts_n:4; /* Log 2 of Mbufs. */
- unsigned int port_id:8;
unsigned int rss_hash:1; /* RSS hash result is enabled. */
unsigned int mark:1; /* Marked flow available on the queue. */
- unsigned int pending_err:1; /* CQE error needs to be handled. */
- unsigned int :7; /* Remaining bits. */
+ unsigned int :15; /* Remaining bits. */
volatile uint32_t *rq_db;
volatile uint32_t *cq_db;
+ uint16_t port_id;
uint16_t rq_ci;
uint16_t rq_pi;
uint16_t cq_ci;
struct mlx5_rxq_stats stats;
uint64_t mbuf_initializer; /* Default rearm_data for vectorized Rx. */
struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */
+ void *cq_uar; /* CQ user access region. */
+ uint32_t cqn; /* CQ number. */
+ uint8_t cq_arm_sn; /* CQ arm seq number. */
} __rte_cache_aligned;
-/* RX queue control descriptor. */
-struct rxq_ctrl {
- struct priv *priv; /* Back pointer to private data. */
+/* Verbs Rx queue elements. */
+struct mlx5_rxq_ibv {
+ LIST_ENTRY(mlx5_rxq_ibv) next; /* Pointer to the next element. */
+ rte_atomic32_t refcnt; /* Reference counter. */
+ struct mlx5_rxq_ctrl *rxq_ctrl; /* Back pointer to parent. */
struct ibv_cq *cq; /* Completion Queue. */
- struct ibv_exp_wq *wq; /* Work Queue. */
- struct fdir_queue *fdir_queue; /* Flow director queue. */
- struct ibv_mr *mr; /* Memory Region (for mp). */
+ struct ibv_wq *wq; /* Work Queue. */
struct ibv_comp_channel *channel;
- unsigned int socket; /* CPU socket ID for allocations. */
- struct rxq rxq; /* Data path structure. */
+ struct mlx5_mr *mr; /* Memory Region (for mp). */
};
-/* Hash RX queue types. */
-enum hash_rxq_type {
- HASH_RXQ_TCPV4,
- HASH_RXQ_UDPV4,
- HASH_RXQ_IPV4,
- HASH_RXQ_TCPV6,
- HASH_RXQ_UDPV6,
- HASH_RXQ_IPV6,
- HASH_RXQ_ETH,
-};
-
-/* Flow structure with Ethernet specification. It is packed to prevent padding
- * between attr and spec as this layout is expected by libibverbs. */
-struct flow_attr_spec_eth {
- struct ibv_exp_flow_attr attr;
- struct ibv_exp_flow_spec_eth spec;
-} __attribute__((packed));
-
-/* Define a struct flow_attr_spec_eth object as an array of at least
- * "size" bytes. Room after the first index is normally used to store
- * extra flow specifications. */
-#define FLOW_ATTR_SPEC_ETH(name, size) \
- struct flow_attr_spec_eth name \
- [((size) / sizeof(struct flow_attr_spec_eth)) + \
- !!((size) % sizeof(struct flow_attr_spec_eth))]
-
-/* Initialization data for hash RX queue. */
-struct hash_rxq_init {
- uint64_t hash_fields; /* Fields that participate in the hash. */
- uint64_t dpdk_rss_hf; /* Matching DPDK RSS hash fields. */
- unsigned int flow_priority; /* Flow priority to use. */
- union {
- struct {
- enum ibv_exp_flow_spec_type type;
- uint16_t size;
- } hdr;
- struct ibv_exp_flow_spec_tcp_udp tcp_udp;
- struct ibv_exp_flow_spec_ipv4 ipv4;
- struct ibv_exp_flow_spec_ipv6 ipv6;
- struct ibv_exp_flow_spec_eth eth;
- } flow_spec; /* Flow specification template. */
- const struct hash_rxq_init *underlayer; /* Pointer to underlayer. */
-};
-
-/* Initialization data for indirection table. */
-struct ind_table_init {
- unsigned int max_size; /* Maximum number of WQs. */
- /* Hash RX queues using this table. */
- unsigned int hash_types;
- unsigned int hash_types_n;
+/* RX queue control descriptor. */
+struct mlx5_rxq_ctrl {
+ LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */
+ rte_atomic32_t refcnt; /* Reference counter. */
+ struct priv *priv; /* Back pointer to private data. */
+ struct mlx5_rxq_ibv *ibv; /* Verbs elements. */
+ struct mlx5_rxq_data rxq; /* Data path structure. */
+ unsigned int socket; /* CPU socket ID for allocations. */
+ unsigned int irq:1; /* Whether IRQ is enabled. */
+ uint16_t idx; /* Queue index. */
};
-/* Initialization data for special flows. */
-struct special_flow_init {
- uint8_t dst_mac_val[6];
- uint8_t dst_mac_mask[6];
- unsigned int hash_types;
- unsigned int per_vlan:1;
+/* Indirection table. */
+struct mlx5_ind_table_ibv {
+ LIST_ENTRY(mlx5_ind_table_ibv) next; /* Pointer to the next element. */
+ rte_atomic32_t refcnt; /* Reference counter. */
+ struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
+ uint16_t queues_n; /**< Number of queues in the list. */
+ uint16_t queues[]; /**< Queue list. */
};
-enum hash_rxq_flow_type {
- HASH_RXQ_FLOW_TYPE_PROMISC,
- HASH_RXQ_FLOW_TYPE_ALLMULTI,
- HASH_RXQ_FLOW_TYPE_BROADCAST,
- HASH_RXQ_FLOW_TYPE_IPV6MULTI,
- HASH_RXQ_FLOW_TYPE_MAC,
-};
-
-#ifndef NDEBUG
-static inline const char *
-hash_rxq_flow_type_str(enum hash_rxq_flow_type flow_type)
-{
- switch (flow_type) {
- case HASH_RXQ_FLOW_TYPE_PROMISC:
- return "promiscuous";
- case HASH_RXQ_FLOW_TYPE_ALLMULTI:
- return "allmulticast";
- case HASH_RXQ_FLOW_TYPE_BROADCAST:
- return "broadcast";
- case HASH_RXQ_FLOW_TYPE_IPV6MULTI:
- return "IPv6 multicast";
- case HASH_RXQ_FLOW_TYPE_MAC:
- return "MAC";
- }
- return NULL;
-}
-#endif /* NDEBUG */
-
-struct hash_rxq {
- struct priv *priv; /* Back pointer to private data. */
- struct ibv_qp *qp; /* Hash RX QP. */
- enum hash_rxq_type type; /* Hash RX queue type. */
- /* MAC flow steering rules, one per VLAN ID. */
- struct ibv_exp_flow *mac_flow
- [MLX5_MAX_MAC_ADDRESSES][MLX5_MAX_VLAN_IDS];
- struct ibv_exp_flow *special_flow
- [MLX5_MAX_SPECIAL_FLOWS][MLX5_MAX_VLAN_IDS];
+/* Hash Rx queue. */
+struct mlx5_hrxq {
+ LIST_ENTRY(mlx5_hrxq) next; /* Pointer to the next element. */
+ rte_atomic32_t refcnt; /* Reference counter. */
+ struct mlx5_ind_table_ibv *ind_table; /* Indirection table. */
+ struct ibv_qp *qp; /* Verbs queue pair. */
+ uint64_t hash_fields; /* Verbs Hash fields. */
+ uint8_t rss_key_len; /* Hash key length in bytes. */
+ uint8_t rss_key[]; /* Hash key. */
};
/* TX queue descriptor. */
__extension__
-struct txq {
+struct mlx5_txq_data {
uint16_t elts_head; /* Current counter in (*elts)[]. */
uint16_t elts_tail; /* Counter of first element awaiting completion. */
uint16_t elts_comp; /* Counter since last completion request. */
uint16_t mpw_comp; /* WQ index since last completion request. */
uint16_t cq_ci; /* Consumer index for completion queue. */
+#ifndef NDEBUG
uint16_t cq_pi; /* Producer index for completion queue. */
+#endif
uint16_t wqe_ci; /* Consumer index for work queue. */
uint16_t wqe_pi; /* Producer index for work queue. */
uint16_t elts_n:4; /* (*elts)[] length (in log2). */
uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
uint16_t wqe_n:4; /* Number of of WQ elements (in log2). */
- uint16_t inline_en:1; /* When set inline is enabled. */
uint16_t tso_en:1; /* When set hardware TSO is enabled. */
uint16_t tunnel_en:1;
/* When set TX offload for tunneled packets are supported. */
+ uint16_t swp_en:1; /* Whether SW parser is enabled. */
uint16_t mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */
uint16_t inline_max_packet_sz; /* Max packet size for inlining. */
+ uint16_t mr_cache_idx; /* Index of last hit entry. */
uint32_t qp_num_8s; /* QP number shifted by 8. */
- uint32_t flags; /* Flags for Tx Queue. */
+ uint64_t offloads; /* Offloads for Tx Queue. */
volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
volatile void *wqes; /* Work queue (use volatile to write into). */
volatile uint32_t *qp_db; /* Work queue doorbell. */
volatile uint32_t *cq_db; /* Completion queue doorbell. */
- volatile void *bf_reg; /* Blueflame register. */
- struct {
- uintptr_t start; /* Start address of MR */
- uintptr_t end; /* End address of MR */
- struct ibv_mr *mr; /* Memory Region (for mp). */
- uint32_t lkey; /* htonl(mr->lkey) */
- } mp2mr[MLX5_PMD_TX_MP_CACHE]; /* MP to MR translation table. */
- uint16_t mr_cache_idx; /* Index of last hit entry. */
+ volatile void *bf_reg; /* Blueflame register remapped. */
+ struct mlx5_mr *mp2mr[MLX5_PMD_TX_MP_CACHE]; /* MR translation table. */
struct rte_mbuf *(*elts)[]; /* TX elements. */
struct mlx5_txq_stats stats; /* TX queue counters. */
} __rte_cache_aligned;
-/* TX queue control descriptor. */
-struct txq_ctrl {
- struct priv *priv; /* Back pointer to private data. */
+/* Verbs Rx queue elements. */
+struct mlx5_txq_ibv {
+ LIST_ENTRY(mlx5_txq_ibv) next; /* Pointer to the next element. */
+ rte_atomic32_t refcnt; /* Reference counter. */
+ struct mlx5_txq_ctrl *txq_ctrl; /* Pointer to the control queue. */
struct ibv_cq *cq; /* Completion Queue. */
struct ibv_qp *qp; /* Queue Pair. */
+};
+
+/* TX queue control descriptor. */
+struct mlx5_txq_ctrl {
+ LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
+ rte_atomic32_t refcnt; /* Reference counter. */
+ struct priv *priv; /* Back pointer to private data. */
unsigned int socket; /* CPU socket ID for allocations. */
- struct txq txq; /* Data path structure. */
+ unsigned int max_inline_data; /* Max inline data. */
+ unsigned int max_tso_header; /* Max TSO header size. */
+ struct mlx5_txq_ibv *ibv; /* Verbs queue object. */
+ struct mlx5_txq_data txq; /* Data path structure. */
+ off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
+ volatile void *bf_reg_orig; /* Blueflame register from verbs. */
+ uint16_t idx; /* Queue index. */
};
/* mlx5_rxq.c */
-extern const struct hash_rxq_init hash_rxq_init[];
-extern const unsigned int hash_rxq_init_n;
-
extern uint8_t rss_hash_default_key[];
extern const size_t rss_hash_default_key_len;
-size_t priv_flow_attr(struct priv *, struct ibv_exp_flow_attr *,
- size_t, enum hash_rxq_type);
-int priv_create_hash_rxqs(struct priv *);
-void priv_destroy_hash_rxqs(struct priv *);
-int priv_allow_flow_type(struct priv *, enum hash_rxq_flow_type);
-int priv_rehash_flows(struct priv *);
-void rxq_cleanup(struct rxq_ctrl *);
-int mlx5_rx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int,
- const struct rte_eth_rxconf *, struct rte_mempool *);
-void mlx5_rx_queue_release(void *);
-int priv_rx_intr_vec_enable(struct priv *priv);
-void priv_rx_intr_vec_disable(struct priv *priv);
-#ifdef HAVE_UPDATE_CQ_CI
+void mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl);
+int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_rxconf *conf,
+ struct rte_mempool *mp);
+void mlx5_rx_queue_release(void *dpdk_rxq);
+int mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev);
+void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev);
int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
-#endif /* HAVE_UPDATE_CQ_CI */
+struct mlx5_rxq_ibv *mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx);
+struct mlx5_rxq_ibv *mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv);
+int mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv);
+int mlx5_rxq_ibv_verify(struct rte_eth_dev *dev);
+struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
+ uint16_t desc, unsigned int socket,
+ const struct rte_eth_rxconf *conf,
+ struct rte_mempool *mp);
+struct mlx5_rxq_ctrl *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_rxq_verify(struct rte_eth_dev *dev);
+int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
+struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_new(struct rte_eth_dev *dev,
+ uint16_t queues[],
+ uint16_t queues_n);
+struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_get(struct rte_eth_dev *dev,
+ uint16_t queues[],
+ uint16_t queues_n);
+int mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
+ struct mlx5_ind_table_ibv *ind_tbl);
+int mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev);
+struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key,
+ uint8_t rss_key_len, uint64_t hash_fields,
+ uint16_t queues[], uint16_t queues_n);
+struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev, uint8_t *rss_key,
+ uint8_t rss_key_len, uint64_t hash_fields,
+ uint16_t queues[], uint16_t queues_n);
+int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq);
+int mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev);
+uint64_t mlx5_get_rx_port_offloads(void);
+uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev);
/* mlx5_txq.c */
-void txq_cleanup(struct txq_ctrl *);
-int txq_ctrl_setup(struct rte_eth_dev *, struct txq_ctrl *, uint16_t,
- unsigned int, const struct rte_eth_txconf *);
-int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int,
- const struct rte_eth_txconf *);
-void mlx5_tx_queue_release(void *);
+int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_txconf *conf);
+void mlx5_tx_queue_release(void *dpdk_txq);
+int mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd);
+struct mlx5_txq_ibv *mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx);
+struct mlx5_txq_ibv *mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv);
+int mlx5_txq_ibv_releasable(struct mlx5_txq_ibv *txq_ibv);
+int mlx5_txq_ibv_verify(struct rte_eth_dev *dev);
+struct mlx5_txq_ctrl *mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx,
+ uint16_t desc, unsigned int socket,
+ const struct rte_eth_txconf *conf);
+struct mlx5_txq_ctrl *mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_txq_verify(struct rte_eth_dev *dev);
+void txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl);
+uint64_t mlx5_get_tx_port_offloads(struct rte_eth_dev *dev);
/* mlx5_rxtx.c */
extern uint32_t mlx5_ptype_table[];
+extern uint8_t mlx5_cksum_table[];
+extern uint8_t mlx5_swp_types_table[];
void mlx5_set_ptype_table(void);
-uint16_t mlx5_tx_burst(void *, struct rte_mbuf **, uint16_t);
-uint16_t mlx5_tx_burst_mpw(void *, struct rte_mbuf **, uint16_t);
-uint16_t mlx5_tx_burst_mpw_inline(void *, struct rte_mbuf **, uint16_t);
-uint16_t mlx5_tx_burst_empw(void *, struct rte_mbuf **, uint16_t);
-uint16_t mlx5_rx_burst(void *, struct rte_mbuf **, uint16_t);
-uint16_t removed_tx_burst(void *, struct rte_mbuf **, uint16_t);
-uint16_t removed_rx_burst(void *, struct rte_mbuf **, uint16_t);
-int mlx5_rx_descriptor_status(void *, uint16_t);
-int mlx5_tx_descriptor_status(void *, uint16_t);
+void mlx5_set_cksum_table(void);
+void mlx5_set_swp_types_table(void);
+uint16_t mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n);
+uint16_t removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset);
+int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset);
/* Vectorized version of mlx5_rxtx.c */
-int priv_check_raw_vec_tx_support(struct priv *);
-int priv_check_vec_tx_support(struct priv *);
-int rxq_check_vec_support(struct rxq *);
-int priv_check_vec_rx_support(struct priv *);
-uint16_t mlx5_tx_burst_raw_vec(void *, struct rte_mbuf **, uint16_t);
-uint16_t mlx5_tx_burst_vec(void *, struct rte_mbuf **, uint16_t);
-uint16_t mlx5_rx_burst_vec(void *, struct rte_mbuf **, uint16_t);
+int mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev);
+int mlx5_check_vec_tx_support(struct rte_eth_dev *dev);
+int mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq_data);
+int mlx5_check_vec_rx_support(struct rte_eth_dev *dev);
+uint16_t mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t mlx5_rx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
/* mlx5_mr.c */
-struct ibv_mr *mlx5_mp2mr(struct ibv_pd *, struct rte_mempool *);
-void txq_mp2mr_iter(struct rte_mempool *, void *);
-uint32_t txq_mp2mr_reg(struct txq *, struct rte_mempool *, unsigned int);
+void mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg);
+struct mlx5_mr *mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq,
+ struct rte_mempool *mp, unsigned int idx);
#ifndef NDEBUG
/**
(syndrome == MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR))
return 0;
if (!check_cqe_seen(cqe)) {
- ERROR("unexpected CQE error %u (0x%02x)"
- " syndrome 0x%02x",
- op_code, op_code, syndrome);
+ DRV_LOG(ERR,
+ "unexpected CQE error %u (0x%02x) syndrome"
+ " 0x%02x",
+ op_code, op_code, syndrome);
rte_hexdump(stderr, "MLX5 Error CQE:",
(const void *)((uintptr_t)err_cqe),
sizeof(*err_cqe));
} else if ((op_code != MLX5_CQE_RESP_SEND) &&
(op_code != MLX5_CQE_REQ)) {
if (!check_cqe_seen(cqe)) {
- ERROR("unexpected CQE opcode %u (0x%02x)",
- op_code, op_code);
+ DRV_LOG(ERR, "unexpected CQE opcode %u (0x%02x)",
+ op_code, op_code);
rte_hexdump(stderr, "MLX5 CQE:",
(const void *)((uintptr_t)cqe),
sizeof(*cqe));
* WQE address.
*/
static inline uintptr_t *
-tx_mlx5_wqe(struct txq *txq, uint16_t ci)
+tx_mlx5_wqe(struct mlx5_txq_data *txq, uint16_t ci)
{
ci &= ((1 << txq->wqe_n) - 1);
return (uintptr_t *)((uintptr_t)txq->wqes + ci * MLX5_WQE_SIZE);
* Pointer to TX queue structure.
*/
static __rte_always_inline void
-mlx5_tx_complete(struct txq *txq)
+mlx5_tx_complete(struct mlx5_txq_data *txq)
{
const uint16_t elts_n = 1 << txq->elts_n;
const uint16_t elts_m = elts_n - 1;
if ((MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) ||
(MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_REQ_ERR)) {
if (!check_cqe_seen(cqe)) {
- ERROR("unexpected error CQE, TX stopped");
+ DRV_LOG(ERR, "unexpected error CQE, Tx stopped");
rte_hexdump(stderr, "MLX5 TXQ:",
(const void *)((uintptr_t)txq->wqes),
((1 << txq->wqe_n) *
}
#endif /* NDEBUG */
++cq_ci;
- txq->wqe_pi = ntohs(cqe->wqe_counter);
+ txq->wqe_pi = rte_be_to_cpu_16(cqe->wqe_counter);
ctrl = (volatile struct mlx5_wqe_ctrl *)
tx_mlx5_wqe(txq, txq->wqe_pi);
elts_tail = ctrl->ctrl3;
txq->cq_ci = cq_ci;
txq->elts_tail = elts_tail;
/* Update the consumer index. */
- rte_wmb();
- *txq->cq_db = htonl(cq_ci);
+ rte_compiler_barrier();
+ *txq->cq_db = rte_cpu_to_be_32(cq_ci);
}
/**
* mr->lkey on success, (uint32_t)-1 on failure.
*/
static __rte_always_inline uint32_t
-mlx5_tx_mb2mr(struct txq *txq, struct rte_mbuf *mb)
+mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
{
uint16_t i = txq->mr_cache_idx;
uintptr_t addr = rte_pktmbuf_mtod(mb, uintptr_t);
+ struct mlx5_mr *mr;
assert(i < RTE_DIM(txq->mp2mr));
- if (likely(txq->mp2mr[i].start <= addr && txq->mp2mr[i].end >= addr))
- return txq->mp2mr[i].lkey;
+ if (likely(txq->mp2mr[i]->start <= addr && txq->mp2mr[i]->end > addr))
+ return txq->mp2mr[i]->lkey;
for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) {
- if (unlikely(txq->mp2mr[i].mr == NULL)) {
+ if (unlikely(txq->mp2mr[i] == NULL ||
+ txq->mp2mr[i]->mr == NULL)) {
/* Unknown MP, add a new MR for it. */
break;
}
- if (txq->mp2mr[i].start <= addr &&
- txq->mp2mr[i].end >= addr) {
- assert(txq->mp2mr[i].lkey != (uint32_t)-1);
- assert(htonl(txq->mp2mr[i].mr->lkey) ==
- txq->mp2mr[i].lkey);
+ if (txq->mp2mr[i]->start <= addr &&
+ txq->mp2mr[i]->end > addr) {
+ assert(txq->mp2mr[i]->lkey != (uint32_t)-1);
txq->mr_cache_idx = i;
- return txq->mp2mr[i].lkey;
+ return txq->mp2mr[i]->lkey;
}
}
- txq->mr_cache_idx = 0;
- return txq_mp2mr_reg(txq, mlx5_tx_mb2mp(mb), i);
+ mr = mlx5_txq_mp2mr_reg(txq, mlx5_tx_mb2mp(mb), i);
+ /*
+ * Request the reference to use in this queue, the original one is
+ * kept by the control plane.
+ */
+ if (mr) {
+ rte_atomic32_inc(&mr->refcnt);
+ txq->mr_cache_idx = i >= RTE_DIM(txq->mp2mr) ? i - 1 : i;
+ return mr->lkey;
+ } else {
+ struct rte_mempool *mp = mlx5_tx_mb2mp(mb);
+
+ DRV_LOG(WARNING, "failed to register mempool 0x%p(%s)",
+ (void *)mp, mp->name);
+ }
+ return (uint32_t)-1;
}
/**
- * Ring TX queue doorbell.
+ * Ring TX queue doorbell and flush the update if requested.
*
* @param txq
* Pointer to TX queue structure.
* @param wqe
* Pointer to the last WQE posted in the NIC.
+ * @param cond
+ * Request for write memory barrier after BlueFlame update.
*/
static __rte_always_inline void
-mlx5_tx_dbrec(struct txq *txq, volatile struct mlx5_wqe *wqe)
+mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe,
+ int cond)
{
uint64_t *dst = (uint64_t *)((uintptr_t)txq->bf_reg);
volatile uint64_t *src = ((volatile uint64_t *)wqe);
- rte_wmb();
- *txq->qp_db = htonl(txq->wqe_ci);
+ rte_cio_wmb();
+ *txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci);
/* Ensure ordering between DB record and BF copy. */
rte_wmb();
*dst = *src;
+ if (cond)
+ rte_wmb();
+}
+
+/**
+ * Ring TX queue doorbell and flush the update by write memory barrier.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param wqe
+ * Pointer to the last WQE posted in the NIC.
+ */
+static __rte_always_inline void
+mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe)
+{
+ mlx5_tx_dbrec_cond_wmb(txq, wqe, 1);
+}
+
+/**
+ * Convert mbuf to Verb SWP.
+ *
+ * @param txq_data
+ * Pointer to the Tx queue.
+ * @param buf
+ * Pointer to the mbuf.
+ * @param tso
+ * TSO offloads enabled.
+ * @param vlan
+ * VLAN offloads enabled
+ * @param offsets
+ * Pointer to the SWP header offsets.
+ * @param swp_types
+ * Pointer to the SWP header types.
+ */
+static __rte_always_inline void
+txq_mbuf_to_swp(struct mlx5_txq_data *txq, struct rte_mbuf *buf,
+ uint8_t tso, uint64_t vlan,
+ uint8_t *offsets, uint8_t *swp_types)
+{
+ uint64_t tunnel = buf->ol_flags & PKT_TX_TUNNEL_MASK;
+ uint16_t idx;
+ uint16_t off;
+ const uint64_t ol_flags_mask = PKT_TX_L4_MASK | PKT_TX_IPV6 |
+ PKT_TX_OUTER_IPV6;
+
+ if (likely(!tunnel || !txq->swp_en ||
+ (tunnel != PKT_TX_TUNNEL_UDP && tunnel != PKT_TX_TUNNEL_IP)))
+ return;
+ /*
+ * The index should have:
+ * bit[0:1] = PKT_TX_L4_MASK
+ * bit[4] = PKT_TX_IPV6
+ * bit[8] = PKT_TX_OUTER_IPV6
+ * bit[9] = PKT_TX_OUTER_UDP
+ */
+ idx = (buf->ol_flags & ol_flags_mask) >> 52;
+ if (tunnel == PKT_TX_TUNNEL_UDP)
+ idx |= 1 << 9;
+ *swp_types = mlx5_swp_types_table[idx];
+ /* swp offsets. */
+ off = buf->outer_l2_len + (vlan ? 4 : 0); /* Outer L3 offset. */
+ if (tso || (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM))
+ offsets[1] = off >> 1;
+ off += buf->outer_l3_len; /* Outer L4 offset. */
+ if (tunnel == PKT_TX_TUNNEL_UDP)
+ offsets[0] = off >> 1;
+ off += buf->l2_len; /* Inner L3 offset. */
+ if (tso || (buf->ol_flags & PKT_TX_IP_CKSUM))
+ offsets[3] = off >> 1;
+ off += buf->l3_len; /* Inner L4 offset. */
+ if (tso || ((buf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) ||
+ ((buf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM))
+ offsets[2] = off >> 1;
+}
+
+/**
+ * Convert the Checksum offloads to Verbs.
+ *
+ * @param buf
+ * Pointer to the mbuf.
+ *
+ * @return
+ * Converted checksum flags.
+ */
+static __rte_always_inline uint8_t
+txq_ol_cksum_to_cs(struct rte_mbuf *buf)
+{
+ uint32_t idx;
+ uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK);
+ const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK |
+ PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM;
+
+ /*
+ * The index should have:
+ * bit[0] = PKT_TX_TCP_SEG
+ * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
+ * bit[4] = PKT_TX_IP_CKSUM
+ * bit[8] = PKT_TX_OUTER_IP_CKSUM
+ * bit[9] = tunnel
+ */
+ idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
+ return mlx5_cksum_table[idx];
+}
+
+/**
+ * Count the number of contiguous single segment packets.
+ *
+ * @param pkts
+ * Pointer to array of packets.
+ * @param pkts_n
+ * Number of packets.
+ *
+ * @return
+ * Number of contiguous single segment packets.
+ */
+static __rte_always_inline unsigned int
+txq_count_contig_single_seg(struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ unsigned int pos;
+
+ if (!pkts_n)
+ return 0;
+ /* Count the number of contiguous single segment packets. */
+ for (pos = 0; pos < pkts_n; ++pos)
+ if (NB_SEGS(pkts[pos]) > 1)
+ break;
+ return pos;
+}
+
+/**
+ * Count the number of contiguous multi-segment packets.
+ *
+ * @param pkts
+ * Pointer to array of packets.
+ * @param pkts_n
+ * Number of packets.
+ *
+ * @return
+ * Number of contiguous multi-segment packets.
+ */
+static __rte_always_inline unsigned int
+txq_count_contig_multi_seg(struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ unsigned int pos;
+
+ if (!pkts_n)
+ return 0;
+ /* Count the number of contiguous multi-segment packets. */
+ for (pos = 0; pos < pkts_n; ++pos)
+ if (NB_SEGS(pkts[pos]) == 1)
+ break;
+ return pos;
}
#endif /* RTE_PMD_MLX5_RXTX_H_ */