X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxtx.c;h=c45ebeed4e5017a8d2d23f9042d2dc7b0ab4e30f;hb=c68f27a2a48f7c0276d4032e4ca8f11d4cb5ea9d;hp=688ee9028ae774c9873cbe7fcc04e773568219c1;hpb=b0b09384579357265db6cb01a0d1ee24b19b3163;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index 688ee9028a..c45ebeed4e 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -42,25 +42,17 @@ #pragma GCC diagnostic ignored "-Wpedantic" #endif #include -#include -#include +#include #ifdef PEDANTIC #pragma GCC diagnostic error "-Wpedantic" #endif -/* DPDK headers don't like -pedantic. */ -#ifdef PEDANTIC -#pragma GCC diagnostic ignored "-Wpedantic" -#endif #include #include #include #include #include #include -#ifdef PEDANTIC -#pragma GCC diagnostic error "-Wpedantic" -#endif #include "mlx5.h" #include "mlx5_utils.h" @@ -69,19 +61,6 @@ #include "mlx5_defs.h" #include "mlx5_prm.h" -static __rte_always_inline int -check_cqe(volatile struct mlx5_cqe *cqe, - unsigned int cqes_n, const uint16_t ci); - -static __rte_always_inline void -txq_complete(struct txq *txq); - -static __rte_always_inline uint32_t -txq_mb2mr(struct txq *txq, struct rte_mbuf *mb); - -static __rte_always_inline void -mlx5_tx_dbrec(struct txq *txq, volatile struct mlx5_wqe *wqe); - static __rte_always_inline uint32_t rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe); @@ -92,100 +71,121 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe, static __rte_always_inline uint32_t rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe); -#ifndef NDEBUG +uint32_t mlx5_ptype_table[] __rte_cache_aligned = { + [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */ +}; /** - * Verify or set magic value in CQE. + * Build a table to translate Rx completion flags to packet type. * - * @param cqe - * Pointer to CQE. - * - * @return - * 0 the first time. + * @note: fix mlx5_dev_supported_ptypes_get() if any change here. */ -static inline int -check_cqe_seen(volatile struct mlx5_cqe *cqe) +void +mlx5_set_ptype_table(void) { - static const uint8_t magic[] = "seen"; - volatile uint8_t (*buf)[sizeof(cqe->rsvd0)] = &cqe->rsvd0; - int ret = 1; unsigned int i; + uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table; - for (i = 0; i < sizeof(magic) && i < sizeof(*buf); ++i) - if (!ret || (*buf)[i] != magic[i]) { - ret = 0; - (*buf)[i] = magic[i]; - } - return ret; -} - -#endif /* NDEBUG */ - -/** - * Check whether CQE is valid. - * - * @param cqe - * Pointer to CQE. - * @param cqes_n - * Size of completion queue. - * @param ci - * Consumer index. - * - * @return - * 0 on success, 1 on failure. - */ -static inline int -check_cqe(volatile struct mlx5_cqe *cqe, - unsigned int cqes_n, const uint16_t ci) -{ - uint16_t idx = ci & cqes_n; - uint8_t op_own = cqe->op_own; - uint8_t op_owner = MLX5_CQE_OWNER(op_own); - uint8_t op_code = MLX5_CQE_OPCODE(op_own); - - if (unlikely((op_owner != (!!(idx))) || (op_code == MLX5_CQE_INVALID))) - return 1; /* No CQE. */ -#ifndef NDEBUG - if ((op_code == MLX5_CQE_RESP_ERR) || - (op_code == MLX5_CQE_REQ_ERR)) { - volatile struct mlx5_err_cqe *err_cqe = (volatile void *)cqe; - uint8_t syndrome = err_cqe->syndrome; - - if ((syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR) || - (syndrome == MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR)) - return 0; - if (!check_cqe_seen(cqe)) - ERROR("unexpected CQE error %u (0x%02x)" - " syndrome 0x%02x", - op_code, op_code, syndrome); - return 1; - } else if ((op_code != MLX5_CQE_RESP_SEND) && - (op_code != MLX5_CQE_REQ)) { - if (!check_cqe_seen(cqe)) - ERROR("unexpected CQE opcode %u (0x%02x)", - op_code, op_code); - return 1; - } -#endif /* NDEBUG */ - return 0; -} - -/** - * Return the address of the WQE. - * - * @param txq - * Pointer to TX queue structure. - * @param wqe_ci - * WQE consumer index. - * - * @return - * WQE address. - */ -static inline uintptr_t * -tx_mlx5_wqe(struct txq *txq, uint16_t ci) -{ - ci &= ((1 << txq->wqe_n) - 1); - return (uintptr_t *)((uintptr_t)txq->wqes + ci * MLX5_WQE_SIZE); + /* Last entry must not be overwritten, reserved for errored packet. */ + for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i) + (*p)[i] = RTE_PTYPE_UNKNOWN; + /* + * The index to the array should have: + * bit[1:0] = l3_hdr_type + * bit[4:2] = l4_hdr_type + * bit[5] = ip_frag + * bit[6] = tunneled + * bit[7] = outer_l3_type + */ + /* L3 */ + (*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG; + (*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG; + /* Fragmented */ + (*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG; + (*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG; + /* TCP */ + (*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; + (*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; + /* UDP */ + (*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP; + (*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP; + /* Repeat with outer_l3_type being set. Just in case. */ + (*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG; + (*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG; + (*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG; + (*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG; + (*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; + (*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; + (*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP; + (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP; + /* Tunneled - L3 */ + (*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG; + (*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG; + (*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG; + (*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG; + /* Tunneled - Fragmented */ + (*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG; + (*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG; + (*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG; + (*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG; + /* Tunneled - TCP */ + (*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; + (*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; + (*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; + (*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; + /* Tunneled - UDP */ + (*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP; + (*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP; + (*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP; + (*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP; } /** @@ -244,174 +244,6 @@ mlx5_copy_to_wq(void *dst, const void *src, size_t n, return ret; } -/** - * Manage TX completions. - * - * When sending a burst, mlx5_tx_burst() posts several WRs. - * - * @param txq - * Pointer to TX queue structure. - */ -static inline void -txq_complete(struct txq *txq) -{ - const uint16_t elts_n = 1 << txq->elts_n; - const uint16_t elts_m = elts_n - 1; - const unsigned int cqe_n = 1 << txq->cqe_n; - const unsigned int cqe_cnt = cqe_n - 1; - uint16_t elts_free = txq->elts_tail; - uint16_t elts_tail; - uint16_t cq_ci = txq->cq_ci; - volatile struct mlx5_cqe *cqe = NULL; - volatile struct mlx5_wqe_ctrl *ctrl; - struct rte_mbuf *m, *free[elts_n]; - struct rte_mempool *pool = NULL; - unsigned int blk_n = 0; - - do { - volatile struct mlx5_cqe *tmp; - - tmp = &(*txq->cqes)[cq_ci & cqe_cnt]; - if (check_cqe(tmp, cqe_n, cq_ci)) - break; - cqe = tmp; -#ifndef NDEBUG - if (MLX5_CQE_FORMAT(cqe->op_own) == MLX5_COMPRESSED) { - if (!check_cqe_seen(cqe)) - ERROR("unexpected compressed CQE, TX stopped"); - return; - } - if ((MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) || - (MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_REQ_ERR)) { - if (!check_cqe_seen(cqe)) - ERROR("unexpected error CQE, TX stopped"); - return; - } -#endif /* NDEBUG */ - ++cq_ci; - } while (1); - if (unlikely(cqe == NULL)) - return; - txq->wqe_pi = ntohs(cqe->wqe_counter); - ctrl = (volatile struct mlx5_wqe_ctrl *) - tx_mlx5_wqe(txq, txq->wqe_pi); - elts_tail = ctrl->ctrl3; - assert((elts_tail & elts_m) < (1 << txq->wqe_n)); - /* Free buffers. */ - while (elts_free != elts_tail) { - m = rte_pktmbuf_prefree_seg((*txq->elts)[elts_free++ & elts_m]); - if (likely(m != NULL)) { - if (likely(m->pool == pool)) { - free[blk_n++] = m; - } else { - if (likely(pool != NULL)) - rte_mempool_put_bulk(pool, - (void *)free, - blk_n); - free[0] = m; - pool = m->pool; - blk_n = 1; - } - } - } - if (blk_n) - rte_mempool_put_bulk(pool, (void *)free, blk_n); -#ifndef NDEBUG - elts_free = txq->elts_tail; - /* Poisoning. */ - while (elts_free != elts_tail) { - memset(&(*txq->elts)[elts_free & elts_m], - 0x66, - sizeof((*txq->elts)[elts_free & elts_m])); - ++elts_free; - } -#endif - txq->cq_ci = cq_ci; - txq->elts_tail = elts_tail; - /* Update the consumer index. */ - rte_wmb(); - *txq->cq_db = htonl(cq_ci); -} - -/** - * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which - * the cloned mbuf is allocated is returned instead. - * - * @param buf - * Pointer to mbuf. - * - * @return - * Memory pool where data is located for given mbuf. - */ -static struct rte_mempool * -txq_mb2mp(struct rte_mbuf *buf) -{ - if (unlikely(RTE_MBUF_INDIRECT(buf))) - return rte_mbuf_from_indirect(buf)->pool; - return buf->pool; -} - -/** - * Get Memory Region (MR) <-> rte_mbuf association from txq->mp2mr[]. - * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full, - * remove an entry first. - * - * @param txq - * Pointer to TX queue structure. - * @param[in] mp - * Memory Pool for which a Memory Region lkey must be returned. - * - * @return - * mr->lkey on success, (uint32_t)-1 on failure. - */ -static inline uint32_t -txq_mb2mr(struct txq *txq, struct rte_mbuf *mb) -{ - uint16_t i = txq->mr_cache_idx; - uintptr_t addr = rte_pktmbuf_mtod(mb, uintptr_t); - - assert(i < RTE_DIM(txq->mp2mr)); - if (likely(txq->mp2mr[i].start <= addr && txq->mp2mr[i].end >= addr)) - return txq->mp2mr[i].lkey; - for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) { - if (unlikely(txq->mp2mr[i].mr == NULL)) { - /* Unknown MP, add a new MR for it. */ - break; - } - if (txq->mp2mr[i].start <= addr && - txq->mp2mr[i].end >= addr) { - assert(txq->mp2mr[i].lkey != (uint32_t)-1); - assert(htonl(txq->mp2mr[i].mr->lkey) == - txq->mp2mr[i].lkey); - txq->mr_cache_idx = i; - return txq->mp2mr[i].lkey; - } - } - txq->mr_cache_idx = 0; - return txq_mp2mr_reg(txq, txq_mb2mp(mb), i); -} - -/** - * Ring TX queue doorbell. - * - * @param txq - * Pointer to TX queue structure. - * @param wqe - * Pointer to the last WQE posted in the NIC. - */ -static inline void -mlx5_tx_dbrec(struct txq *txq, volatile struct mlx5_wqe *wqe) -{ - uint64_t *dst = (uint64_t *)((uintptr_t)txq->bf_reg); - volatile uint64_t *src = ((volatile uint64_t *)wqe); - - rte_wmb(); - *txq->qp_db = htonl(txq->wqe_ci); - /* Ensure ordering between DB record and BF copy. */ - rte_wmb(); - *dst = *src; -} - /** * DPDK callback to check the status of a tx descriptor. * @@ -429,7 +261,7 @@ mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset) struct txq *txq = tx_queue; uint16_t used; - txq_complete(txq); + mlx5_tx_complete(txq); used = txq->elts_head - txq->elts_tail; if (offset < used) return RTE_ETH_TX_DESC_FULL; @@ -473,7 +305,7 @@ mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset) op_own = cqe->op_own; if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) - n = ntohl(cqe->byte_cnt); + n = rte_be_to_cpu_32(cqe->byte_cnt); else n = 1; cq_ci += n; @@ -525,7 +357,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) /* Prefetch first packet cacheline. */ rte_prefetch0(*pkts); /* Start processing. */ - txq_complete(txq); + mlx5_tx_complete(txq); max_elts = (elts_n - (elts_head - txq->elts_tail)); max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); if (unlikely(!max_wqe)) @@ -573,8 +405,10 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) #ifdef MLX5_PMD_SOFT_COUNTERS total_length = length; #endif - if (length < (MLX5_WQE_DWORD_SIZE + 2)) + if (length < (MLX5_WQE_DWORD_SIZE + 2)) { + txq->stats.oerrors++; break; + } /* Update element. */ (*txq->elts)[elts_head & elts_m] = buf; /* Prefetch next buffer data. */ @@ -601,7 +435,8 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE; /* Replace the Ethernet type by the VLAN if necessary. */ if (buf->ol_flags & PKT_TX_VLAN_PKT) { - uint32_t vlan = htonl(0x81000000 | buf->vlan_tci); + uint32_t vlan = rte_cpu_to_be_32(0x81000000 | + buf->vlan_tci); unsigned int len = 2 * ETHER_ADDR_LEN - 2; addr += 2; @@ -621,6 +456,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) length -= pkt_inline_sz; addr += pkt_inline_sz; } + raw += MLX5_WQE_DWORD_SIZE; if (txq->tso_en) { tso = buf->ol_flags & PKT_TX_TCP_SEG; if (tso) { @@ -639,7 +475,10 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) tso_header_sz = buf->l2_len + vlan_sz + buf->l3_len + buf->l4_len; tso_segsz = buf->tso_segsz; - + if (unlikely(tso_segsz == 0)) { + txq->stats.oerrors++; + break; + } if (is_tunneled && txq->tunnel_en) { tso_header_sz += buf->outer_l2_len + buf->outer_l3_len; @@ -648,12 +487,13 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) cs_flags |= MLX5_ETH_WQE_L4_CSUM; } if (unlikely(tso_header_sz > - MLX5_MAX_TSO_HEADER)) + MLX5_MAX_TSO_HEADER)) { + txq->stats.oerrors++; break; + } copy_b = tso_header_sz - pkt_inline_sz; /* First seg must contain all headers. */ assert(copy_b <= length); - raw += MLX5_WQE_DWORD_SIZE; if (copy_b && ((end - (uintptr_t)raw) > copy_b)) { uint16_t n = (MLX5_WQE_DS(copy_b) - @@ -666,19 +506,18 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) (void *)addr, copy_b); addr += copy_b; length -= copy_b; + /* Include padding for TSO header. */ + copy_b = MLX5_WQE_DS(copy_b) * + MLX5_WQE_DWORD_SIZE; pkt_inline_sz += copy_b; - /* - * Another DWORD will be added - * in the inline part. - */ - raw += MLX5_WQE_DS(copy_b) * - MLX5_WQE_DWORD_SIZE - - MLX5_WQE_DWORD_SIZE; + raw += copy_b; } else { /* NOP WQE. */ wqe->ctrl = (rte_v128u32_t){ - htonl(txq->wqe_ci << 8), - htonl(txq->qp_num_8s | 1), + rte_cpu_to_be_32( + txq->wqe_ci << 8), + rte_cpu_to_be_32( + txq->qp_num_8s | 1), 0, 0, }; @@ -691,19 +530,20 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) } /* Inline if enough room. */ if (inline_en || tso) { + uint32_t inl; uintptr_t end = (uintptr_t) (((uintptr_t)txq->wqes) + (1 << txq->wqe_n) * MLX5_WQE_SIZE); unsigned int inline_room = max_inline * RTE_CACHE_LINE_SIZE - - (pkt_inline_sz - 2); + (pkt_inline_sz - 2) - + !!tso * sizeof(inl); uintptr_t addr_end = (addr + inline_room) & ~(RTE_CACHE_LINE_SIZE - 1); unsigned int copy_b = (addr_end > addr) ? RTE_MIN((addr_end - addr), length) : 0; - raw += MLX5_WQE_DWORD_SIZE; if (copy_b && ((end - (uintptr_t)raw) > copy_b)) { /* * One Dseg remains in the current WQE. To @@ -717,11 +557,13 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) max_wqe -= n; if (tso) { uint32_t inl = - htonl(copy_b | MLX5_INLINE_SEG); + rte_cpu_to_be_32(copy_b | + MLX5_INLINE_SEG); pkt_inline_sz = MLX5_WQE_DS(tso_header_sz) * MLX5_WQE_DWORD_SIZE; + rte_memcpy((void *)raw, (void *)&inl, sizeof(inl)); raw += sizeof(inl); @@ -770,10 +612,10 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) ds = 3; use_dseg: /* Add the remaining packet as a simple ds. */ - naddr = htonll(addr); + naddr = rte_cpu_to_be_64(addr); *dseg = (rte_v128u32_t){ - htonl(length), - txq_mb2mr(txq, buf), + rte_cpu_to_be_32(length), + mlx5_tx_mb2mr(txq, buf), naddr, naddr >> 32, }; @@ -809,10 +651,10 @@ next_seg: total_length += length; #endif /* Store segment information. */ - naddr = htonll(rte_pktmbuf_mtod(buf, uintptr_t)); + naddr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t)); *dseg = (rte_v128u32_t){ - htonl(length), - txq_mb2mr(txq, buf), + rte_cpu_to_be_32(length), + mlx5_tx_mb2mr(txq, buf), naddr, naddr >> 32, }; @@ -824,27 +666,33 @@ next_seg: else j += sg; next_pkt: + if (ds > MLX5_DSEG_MAX) { + txq->stats.oerrors++; + break; + } ++elts_head; ++pkts; ++i; /* Initialize known and common part of the WQE structure. */ if (tso) { wqe->ctrl = (rte_v128u32_t){ - htonl((txq->wqe_ci << 8) | MLX5_OPCODE_TSO), - htonl(txq->qp_num_8s | ds), + rte_cpu_to_be_32((txq->wqe_ci << 8) | + MLX5_OPCODE_TSO), + rte_cpu_to_be_32(txq->qp_num_8s | ds), 0, 0, }; wqe->eseg = (rte_v128u32_t){ 0, - cs_flags | (htons(tso_segsz) << 16), + cs_flags | (rte_cpu_to_be_16(tso_segsz) << 16), 0, - (ehdr << 16) | htons(tso_header_sz), + (ehdr << 16) | rte_cpu_to_be_16(tso_header_sz), }; } else { wqe->ctrl = (rte_v128u32_t){ - htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND), - htonl(txq->qp_num_8s | ds), + rte_cpu_to_be_32((txq->wqe_ci << 8) | + MLX5_OPCODE_SEND), + rte_cpu_to_be_32(txq->qp_num_8s | ds), 0, 0, }; @@ -852,7 +700,7 @@ next_pkt: 0, cs_flags, 0, - (ehdr << 16) | htons(pkt_inline_sz), + (ehdr << 16) | rte_cpu_to_be_16(pkt_inline_sz), }; } next_wqe: @@ -872,7 +720,7 @@ next_wqe: comp = txq->elts_comp + i + j + k; if (comp >= MLX5_TX_COMP_THRESH) { /* Request completion on last WQE. */ - last_wqe->ctrl2 = htonl(8); + last_wqe->ctrl2 = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ last_wqe->ctrl3 = txq->elts_head; txq->elts_comp = 0; @@ -911,13 +759,14 @@ mlx5_mpw_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length) mpw->len = length; mpw->total_len = 0; mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx); - mpw->wqe->eseg.mss = htons(length); + mpw->wqe->eseg.mss = rte_cpu_to_be_16(length); mpw->wqe->eseg.inline_hdr_sz = 0; mpw->wqe->eseg.rsvd0 = 0; mpw->wqe->eseg.rsvd1 = 0; mpw->wqe->eseg.rsvd2 = 0; - mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_MPW << 24) | - (txq->wqe_ci << 8) | MLX5_OPCODE_TSO); + mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) | + (txq->wqe_ci << 8) | + MLX5_OPCODE_TSO); mpw->wqe->ctrl[2] = 0; mpw->wqe->ctrl[3] = 0; mpw->data.dseg[0] = (volatile struct mlx5_wqe_data_seg *) @@ -946,7 +795,7 @@ mlx5_mpw_close(struct txq *txq, struct mlx5_mpw *mpw) * Store size in multiple of 16 bytes. Control and Ethernet segments * count as 2. */ - mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | (2 + num)); + mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s | (2 + num)); mpw->state = MLX5_MPW_STATE_CLOSED; if (num < 3) ++txq->wqe_ci; @@ -991,7 +840,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci)); rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1)); /* Start processing. */ - txq_complete(txq); + mlx5_tx_complete(txq); max_elts = (elts_n - (elts_head - txq->elts_tail)); max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); if (unlikely(!max_wqe)) @@ -1010,8 +859,10 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) if (max_elts < segs_n) break; /* Do not bother with large packets MPW cannot handle. */ - if (segs_n > MLX5_MPW_DSEG_MAX) + if (segs_n > MLX5_MPW_DSEG_MAX) { + txq->stats.oerrors++; break; + } max_elts -= segs_n; --pkts_n; /* Should we enable HW CKSUM offload */ @@ -1053,9 +904,9 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) dseg = mpw.data.dseg[mpw.pkts_n]; addr = rte_pktmbuf_mtod(buf, uintptr_t); *dseg = (struct mlx5_wqe_data_seg){ - .byte_count = htonl(DATA_LEN(buf)), - .lkey = txq_mb2mr(txq, buf), - .addr = htonll(addr), + .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)), + .lkey = mlx5_tx_mb2mr(txq, buf), + .addr = rte_cpu_to_be_64(addr), }; #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG) length += DATA_LEN(buf); @@ -1083,7 +934,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) volatile struct mlx5_wqe *wqe = mpw.wqe; /* Request completion on last WQE. */ - wqe->ctrl[2] = htonl(8); + wqe->ctrl[2] = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ wqe->ctrl[3] = elts_head; txq->elts_comp = 0; @@ -1123,12 +974,12 @@ mlx5_mpw_inline_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length) mpw->len = length; mpw->total_len = 0; mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx); - mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_MPW << 24) | - (txq->wqe_ci << 8) | - MLX5_OPCODE_TSO); + mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) | + (txq->wqe_ci << 8) | + MLX5_OPCODE_TSO); mpw->wqe->ctrl[2] = 0; mpw->wqe->ctrl[3] = 0; - mpw->wqe->eseg.mss = htons(length); + mpw->wqe->eseg.mss = rte_cpu_to_be_16(length); mpw->wqe->eseg.inline_hdr_sz = 0; mpw->wqe->eseg.cs_flags = 0; mpw->wqe->eseg.rsvd0 = 0; @@ -1159,9 +1010,10 @@ mlx5_mpw_inline_close(struct txq *txq, struct mlx5_mpw *mpw) * Store size in multiple of 16 bytes. Control and Ethernet segments * count as 2. */ - mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | MLX5_WQE_DS(size)); + mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s | + MLX5_WQE_DS(size)); mpw->state = MLX5_MPW_STATE_CLOSED; - inl->byte_cnt = htonl(mpw->total_len | MLX5_INLINE_SEG); + inl->byte_cnt = rte_cpu_to_be_32(mpw->total_len | MLX5_INLINE_SEG); txq->wqe_ci += (size + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE; } @@ -1214,7 +1066,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci)); rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1)); /* Start processing. */ - txq_complete(txq); + mlx5_tx_complete(txq); max_elts = (elts_n - (elts_head - txq->elts_tail)); do { struct rte_mbuf *buf = *(pkts++); @@ -1231,8 +1083,10 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, if (max_elts < segs_n) break; /* Do not bother with large packets MPW cannot handle. */ - if (segs_n > MLX5_MPW_DSEG_MAX) + if (segs_n > MLX5_MPW_DSEG_MAX) { + txq->stats.oerrors++; break; + } max_elts -= segs_n; --pkts_n; /* @@ -1299,9 +1153,10 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, dseg = mpw.data.dseg[mpw.pkts_n]; addr = rte_pktmbuf_mtod(buf, uintptr_t); *dseg = (struct mlx5_wqe_data_seg){ - .byte_count = htonl(DATA_LEN(buf)), - .lkey = txq_mb2mr(txq, buf), - .addr = htonll(addr), + .byte_count = + rte_cpu_to_be_32(DATA_LEN(buf)), + .lkey = mlx5_tx_mb2mr(txq, buf), + .addr = rte_cpu_to_be_64(addr), }; #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG) length += DATA_LEN(buf); @@ -1373,7 +1228,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, volatile struct mlx5_wqe *wqe = mpw.wqe; /* Request completion on last WQE. */ - wqe->ctrl[2] = htonl(8); + wqe->ctrl[2] = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ wqe->ctrl[3] = elts_head; txq->elts_comp = 0; @@ -1413,9 +1268,10 @@ mlx5_empw_new(struct txq *txq, struct mlx5_mpw *mpw, int padding) mpw->pkts_n = 0; mpw->total_len = sizeof(struct mlx5_wqe); mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx); - mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_ENHANCED_MPSW << 24) | - (txq->wqe_ci << 8) | - MLX5_OPCODE_ENHANCED_MPSW); + mpw->wqe->ctrl[0] = + rte_cpu_to_be_32((MLX5_OPC_MOD_ENHANCED_MPSW << 24) | + (txq->wqe_ci << 8) | + MLX5_OPCODE_ENHANCED_MPSW); mpw->wqe->ctrl[2] = 0; mpw->wqe->ctrl[3] = 0; memset((void *)(uintptr_t)&mpw->wqe->eseg, 0, MLX5_WQE_DWORD_SIZE); @@ -1423,9 +1279,9 @@ mlx5_empw_new(struct txq *txq, struct mlx5_mpw *mpw, int padding) uintptr_t addr = (uintptr_t)(mpw->wqe + 1); /* Pad the first 2 DWORDs with zero-length inline header. */ - *(volatile uint32_t *)addr = htonl(MLX5_INLINE_SEG); + *(volatile uint32_t *)addr = rte_cpu_to_be_32(MLX5_INLINE_SEG); *(volatile uint32_t *)(addr + MLX5_WQE_DWORD_SIZE) = - htonl(MLX5_INLINE_SEG); + rte_cpu_to_be_32(MLX5_INLINE_SEG); mpw->total_len += 2 * MLX5_WQE_DWORD_SIZE; /* Start from the next WQEBB. */ mpw->data.raw = (volatile void *)(tx_mlx5_wqe(txq, idx + 1)); @@ -1453,7 +1309,8 @@ mlx5_empw_close(struct txq *txq, struct mlx5_mpw *mpw) /* Store size in multiple of 16 bytes. Control and Ethernet segments * count as 2. */ - mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | MLX5_WQE_DS(mpw->total_len)); + mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s | + MLX5_WQE_DS(mpw->total_len)); mpw->state = MLX5_MPW_STATE_CLOSED; ret = (mpw->total_len + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE; txq->wqe_ci += ret; @@ -1495,7 +1352,7 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) if (unlikely(!pkts_n)) return 0; /* Start processing. */ - txq_complete(txq); + mlx5_tx_complete(txq); max_elts = (elts_n - (elts_head - txq->elts_tail)); /* A CQE slot must always be available. */ assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci)); @@ -1520,8 +1377,10 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) if (max_elts - j < segs_n) break; /* Do not bother with large packets MPW cannot handle. */ - if (segs_n > MLX5_MPW_DSEG_MAX) + if (segs_n > MLX5_MPW_DSEG_MAX) { + txq->stats.oerrors++; break; + } /* Should we enable HW CKSUM offload. */ if (buf->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) @@ -1606,9 +1465,10 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) dseg = mpw.data.dseg[mpw.pkts_n]; addr = rte_pktmbuf_mtod(buf, uintptr_t); *dseg = (struct mlx5_wqe_data_seg){ - .byte_count = htonl(DATA_LEN(buf)), - .lkey = txq_mb2mr(txq, buf), - .addr = htonll(addr), + .byte_count = rte_cpu_to_be_32( + DATA_LEN(buf)), + .lkey = mlx5_tx_mb2mr(txq, buf), + .addr = rte_cpu_to_be_64(addr), }; #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG) length += DATA_LEN(buf); @@ -1631,7 +1491,7 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED); assert(length == DATA_LEN(buf)); - inl_hdr = htonl(length | MLX5_INLINE_SEG); + inl_hdr = rte_cpu_to_be_32(length | MLX5_INLINE_SEG); addr = rte_pktmbuf_mtod(buf, uintptr_t); mpw.data.raw = (volatile void *) ((uintptr_t)mpw.data.raw + inl_pad); @@ -1687,10 +1547,10 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) for (n = 0; n * RTE_CACHE_LINE_SIZE < length; n++) rte_prefetch2((void *)(addr + n * RTE_CACHE_LINE_SIZE)); - naddr = htonll(addr); + naddr = rte_cpu_to_be_64(addr); *dseg = (rte_v128u32_t) { - htonl(length), - txq_mb2mr(txq, buf), + rte_cpu_to_be_32(length), + mlx5_tx_mb2mr(txq, buf), naddr, naddr >> 32, }; @@ -1717,7 +1577,7 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) volatile struct mlx5_wqe *wqe = mpw.wqe; /* Request completion on last WQE. */ - wqe->ctrl[2] = htonl(8); + wqe->ctrl[2] = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ wqe->ctrl[3] = elts_head; txq->elts_comp = 0; @@ -1754,30 +1614,20 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) static inline uint32_t rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe) { - uint32_t pkt_type; - uint16_t flags = ntohs(cqe->hdr_type_etc); + uint8_t idx; + uint8_t pinfo = cqe->pkt_info; + uint16_t ptype = cqe->hdr_type_etc; - if (cqe->pkt_info & MLX5_CQE_RX_TUNNEL_PACKET) { - pkt_type = - TRANSPOSE(flags, - MLX5_CQE_RX_IPV4_PACKET, - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN) | - TRANSPOSE(flags, - MLX5_CQE_RX_IPV6_PACKET, - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN); - pkt_type |= ((cqe->pkt_info & MLX5_CQE_RX_OUTER_PACKET) ? - RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN); - } else { - pkt_type = - TRANSPOSE(flags, - MLX5_CQE_L3_HDR_TYPE_IPV6, - RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) | - TRANSPOSE(flags, - MLX5_CQE_L3_HDR_TYPE_IPV4, - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN); - } - return pkt_type; + /* + * The index to the array should have: + * bit[1:0] = l3_hdr_type + * bit[4:2] = l4_hdr_type + * bit[5] = ip_frag + * bit[6] = tunneled + * bit[7] = outer_l3_type + */ + idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10); + return mlx5_ptype_table[idx]; } /** @@ -1809,10 +1659,10 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe, if (zip->ai) { volatile struct mlx5_mini_cqe8 (*mc)[8] = (volatile struct mlx5_mini_cqe8 (*)[8]) - (uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt]); + (uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].pkt_info); - len = ntohl((*mc)[zip->ai & 7].byte_cnt); - *rss_hash = ntohl((*mc)[zip->ai & 7].rx_hash_result); + len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt); + *rss_hash = rte_be_to_cpu_32((*mc)[zip->ai & 7].rx_hash_result); if ((++zip->ai & 7) == 0) { /* Invalidate consumed CQEs */ idx = zip->ca; @@ -1857,10 +1707,10 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe, volatile struct mlx5_mini_cqe8 (*mc)[8] = (volatile struct mlx5_mini_cqe8 (*)[8]) (uintptr_t)(&(*rxq->cqes)[rxq->cq_ci & - cqe_cnt]); + cqe_cnt].pkt_info); /* Fix endianness. */ - zip->cqe_cnt = ntohl(cqe->byte_cnt); + zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt); /* * Current mini array position is the one returned by * check_cqe64(). @@ -1875,8 +1725,8 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe, --rxq->cq_ci; zip->cq_ci = rxq->cq_ci + zip->cqe_cnt; /* Get packet size to return. */ - len = ntohl((*mc)[0].byte_cnt); - *rss_hash = ntohl((*mc)[0].rx_hash_result); + len = rte_be_to_cpu_32((*mc)[0].byte_cnt); + *rss_hash = rte_be_to_cpu_32((*mc)[0].rx_hash_result); zip->ai = 1; /* Prefetch all the entries to be invalidated */ idx = zip->ca; @@ -1886,8 +1736,8 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe, ++idx; } } else { - len = ntohl(cqe->byte_cnt); - *rss_hash = ntohl(cqe->rx_hash_res); + len = rte_be_to_cpu_32(cqe->byte_cnt); + *rss_hash = rte_be_to_cpu_32(cqe->rx_hash_res); } /* Error while receiving packet. */ if (unlikely(MLX5_CQE_OPCODE(op_own) == MLX5_CQE_RESP_ERR)) @@ -1911,7 +1761,7 @@ static inline uint32_t rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe) { uint32_t ol_flags = 0; - uint16_t flags = ntohs(cqe->hdr_type_etc); + uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc); ol_flags = TRANSPOSE(flags, @@ -2008,7 +1858,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) pkt = seg; assert(len >= (rxq->crc_present << 2)); /* Update packet information. */ - pkt->packet_type = 0; + pkt->packet_type = rxq_cq_to_pkt_type(cqe); pkt->ol_flags = 0; if (rss_hash_res && rxq->rss_hash) { pkt->hash.rss = rss_hash_res; @@ -2018,7 +1868,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) { pkt->ol_flags |= PKT_RX_FDIR; if (cqe->sop_drop_qpn != - htonl(MLX5_FLOW_MARK_DEFAULT)) { + rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) { uint32_t mark = cqe->sop_drop_qpn; pkt->ol_flags |= PKT_RX_FDIR_ID; @@ -2026,16 +1876,15 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) mlx5_flow_mark_get(mark); } } - if (rxq->csum | rxq->csum_l2tun) { - pkt->packet_type = rxq_cq_to_pkt_type(cqe); + if (rxq->csum | rxq->csum_l2tun) pkt->ol_flags |= rxq_cq_to_ol_flags(rxq, cqe); - } if (rxq->vlan_strip && (cqe->hdr_type_etc & - htons(MLX5_CQE_VLAN_STRIPPED))) { + rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) { pkt->ol_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED; - pkt->vlan_tci = ntohs(cqe->vlan_info); + pkt->vlan_tci = + rte_be_to_cpu_16(cqe->vlan_info); } if (rxq->crc_present) len -= ETHER_CRC_LEN; @@ -2051,7 +1900,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) * of the buffers are already known, only the buffer address * changes. */ - wqe->addr = htonll(rte_pktmbuf_mtod(rep, uintptr_t)); + wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t)); if (len > DATA_LEN(seg)) { len -= DATA_LEN(seg); ++NB_SEGS(pkt); @@ -2079,9 +1928,9 @@ skip: /* Update the consumer index. */ rxq->rq_ci = rq_ci >> sges_n; rte_wmb(); - *rxq->cq_db = htonl(rxq->cq_ci); + *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); rte_wmb(); - *rxq->rq_db = htonl(rxq->rq_ci); + *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); #ifdef MLX5_PMD_SOFT_COUNTERS /* Increment packets counter. */ rxq->stats.ipackets += i; @@ -2138,3 +1987,65 @@ removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) (void)pkts_n; return 0; } + +/* + * Vectorized Rx/Tx routines are not compiled in when required vector + * instructions are not supported on a target architecture. The following null + * stubs are needed for linkage when those are not included outside of this file + * (e.g. mlx5_rxtx_vec_sse.c for x86). + */ + +uint16_t __attribute__((weak)) +mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) +{ + (void)dpdk_txq; + (void)pkts; + (void)pkts_n; + return 0; +} + +uint16_t __attribute__((weak)) +mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) +{ + (void)dpdk_txq; + (void)pkts; + (void)pkts_n; + return 0; +} + +uint16_t __attribute__((weak)) +mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +{ + (void)dpdk_rxq; + (void)pkts; + (void)pkts_n; + return 0; +} + +int __attribute__((weak)) +priv_check_raw_vec_tx_support(struct priv *priv) +{ + (void)priv; + return -ENOTSUP; +} + +int __attribute__((weak)) +priv_check_vec_tx_support(struct priv *priv) +{ + (void)priv; + return -ENOTSUP; +} + +int __attribute__((weak)) +rxq_check_vec_support(struct rxq *rxq) +{ + (void)rxq; + return -ENOTSUP; +} + +int __attribute__((weak)) +priv_check_vec_rx_support(struct priv *priv) +{ + (void)priv; + return -ENOTSUP; +}