static __rte_always_inline void
mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx);
+static int
+mlx5_queue_state_modify(struct rte_eth_dev *dev,
+ struct mlx5_mp_arg_queue_state_modify *sm);
+
uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
[0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
};
}
}
-/**
- * Return the size of tailroom of WQ.
- *
- * @param txq
- * Pointer to TX queue structure.
- * @param addr
- * Pointer to tail of WQ.
- *
- * @return
- * Size of tailroom.
- */
-static inline size_t
-tx_mlx5_wq_tailroom(struct mlx5_txq_data *txq, void *addr)
-{
- size_t tailroom;
- tailroom = (uintptr_t)(txq->wqes) +
- (1 << txq->wqe_n) * MLX5_WQE_SIZE -
- (uintptr_t)addr;
- return tailroom;
-}
-
-/**
- * Copy data to tailroom of circular queue.
- *
- * @param dst
- * Pointer to destination.
- * @param src
- * Pointer to source.
- * @param n
- * Number of bytes to copy.
- * @param base
- * Pointer to head of queue.
- * @param tailroom
- * Size of tailroom from dst.
- *
- * @return
- * Pointer after copied data.
- */
-static inline void *
-mlx5_copy_to_wq(void *dst, const void *src, size_t n,
- void *base, size_t tailroom)
-{
- void *ret;
-
- if (n > tailroom) {
- rte_memcpy(dst, src, tailroom);
- rte_memcpy(base, (void *)((uintptr_t)src + tailroom),
- n - tailroom);
- ret = (uint8_t *)base + n - tailroom;
- } else {
- rte_memcpy(dst, src, n);
- ret = (n == tailroom) ? base : (uint8_t *)dst + n;
- }
- return ret;
-}
-
-/**
- * Inline TSO headers into WQE.
- *
- * @return
- * 0 on success, negative errno value on failure.
- */
-static int
-inline_tso(struct mlx5_txq_data *txq, struct rte_mbuf *buf,
- uint32_t *length,
- uintptr_t *addr,
- uint16_t *pkt_inline_sz,
- uint8_t **raw,
- uint16_t *max_wqe,
- uint16_t *tso_segsz,
- uint16_t *tso_header_sz)
-{
- uintptr_t end = (uintptr_t)(((uintptr_t)txq->wqes) +
- (1 << txq->wqe_n) * MLX5_WQE_SIZE);
- unsigned int copy_b;
- uint8_t vlan_sz = (buf->ol_flags & PKT_TX_VLAN_PKT) ? 4 : 0;
- const uint8_t tunneled = txq->tunnel_en && (buf->ol_flags &
- PKT_TX_TUNNEL_MASK);
- uint16_t n_wqe;
-
- *tso_segsz = buf->tso_segsz;
- *tso_header_sz = buf->l2_len + vlan_sz + buf->l3_len + buf->l4_len;
- if (unlikely(*tso_segsz == 0 || *tso_header_sz == 0)) {
- txq->stats.oerrors++;
- return -EINVAL;
- }
- if (tunneled)
- *tso_header_sz += buf->outer_l2_len + buf->outer_l3_len;
- /* First seg must contain all TSO headers. */
- if (unlikely(*tso_header_sz > MLX5_MAX_TSO_HEADER) ||
- *tso_header_sz > DATA_LEN(buf)) {
- txq->stats.oerrors++;
- return -EINVAL;
- }
- copy_b = *tso_header_sz - *pkt_inline_sz;
- if (!copy_b || ((end - (uintptr_t)*raw) < copy_b))
- return -EAGAIN;
- n_wqe = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4;
- if (unlikely(*max_wqe < n_wqe))
- return -EINVAL;
- *max_wqe -= n_wqe;
- rte_memcpy((void *)*raw, (void *)*addr, copy_b);
- *length -= copy_b;
- *addr += copy_b;
- copy_b = MLX5_WQE_DS(copy_b) * MLX5_WQE_DWORD_SIZE;
- *pkt_inline_sz += copy_b;
- *raw += copy_b;
- return 0;
-}
-
-/**
- * DPDK callback to check the status of a tx descriptor.
- *
- * @param tx_queue
- * The tx queue.
- * @param[in] offset
- * The index of the descriptor in the ring.
- *
- * @return
- * The status of the tx descriptor.
- */
-int
-mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
-{
- struct mlx5_txq_data *txq = tx_queue;
- uint16_t used;
-
- mlx5_tx_complete(txq);
- used = txq->elts_head - txq->elts_tail;
- if (offset < used)
- return RTE_ETH_TX_DESC_FULL;
- return RTE_ETH_TX_DESC_DONE;
-}
-
/**
* Internal function to compute the number of used descriptors in an RX queue
*
}
/**
- * Move QP from error state to running state.
+ * Move QP from error state to running state and initialize indexes.
*
- * @param txq
- * Pointer to TX queue structure.
- * @param qp
- * The qp pointer for recovery.
+ * @param txq_ctrl
+ * Pointer to TX queue control structure.
*
* @return
- * 0 on success, else errno value.
+ * 0 on success, else -1.
*/
static int
-tx_recover_qp(struct mlx5_txq_data *txq, struct ibv_qp *qp)
+tx_recover_qp(struct mlx5_txq_ctrl *txq_ctrl)
{
- int ret;
- struct ibv_qp_attr mod = {
- .qp_state = IBV_QPS_RESET,
- .port_num = 1,
- };
- ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
- if (ret) {
- DRV_LOG(ERR, "Cannot change the Tx QP state to RESET %d\n",
- ret);
- return ret;
- }
- mod.qp_state = IBV_QPS_INIT;
- ret = mlx5_glue->modify_qp(qp, &mod,
- (IBV_QP_STATE | IBV_QP_PORT));
- if (ret) {
- DRV_LOG(ERR, "Cannot change Tx QP state to INIT %d\n", ret);
- return ret;
- }
- mod.qp_state = IBV_QPS_RTR;
- ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
- if (ret) {
- DRV_LOG(ERR, "Cannot change Tx QP state to RTR %d\n", ret);
- return ret;
- }
- mod.qp_state = IBV_QPS_RTS;
- ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
- if (ret) {
- DRV_LOG(ERR, "Cannot change Tx QP state to RTS %d\n", ret);
- return ret;
- }
- txq->wqe_ci = 0;
- txq->wqe_pi = 0;
- txq->elts_comp = 0;
+ struct mlx5_mp_arg_queue_state_modify sm = {
+ .is_wq = 0,
+ .queue_id = txq_ctrl->txq.idx,
+ };
+
+ if (mlx5_queue_state_modify(ETH_DEV(txq_ctrl->priv), &sm))
+ return -1;
+ txq_ctrl->txq.wqe_ci = 0;
+ txq_ctrl->txq.wqe_pi = 0;
+ txq_ctrl->txq.elts_comp = 0;
return 0;
}
MKSTR(err_str, "Unexpected CQE error syndrome "
"0x%02x CQN = %u SQN = %u wqe_counter = %u "
"wq_ci = %u cq_ci = %u", err_cqe->syndrome,
- txq_ctrl->cqn, txq->qp_num_8s >> 8,
+ txq->cqe_s, txq->qp_num_8s >> 8,
rte_be_to_cpu_16(err_cqe->wqe_counter),
txq->wqe_ci, txq->cq_ci);
MKSTR(name, "dpdk_mlx5_port_%u_txq_%u_index_%u_%u",
mlx5_dump_debug_information(name, NULL, err_str, 0);
mlx5_dump_debug_information(name, "MLX5 Error CQ:",
(const void *)((uintptr_t)
- &(*txq->cqes)[0]),
+ txq->cqes),
sizeof(*err_cqe) *
(1 << txq->cqe_n));
mlx5_dump_debug_information(name, "MLX5 Error SQ:",
(const void *)((uintptr_t)
- tx_mlx5_wqe(txq, 0)),
+ txq->wqes),
MLX5_WQE_SIZE *
(1 << txq->wqe_n));
txq_ctrl->dump_file_n++;
*/
txq->stats.oerrors += ((txq->wqe_ci & wqe_m) -
new_wqe_pi) & wqe_m;
- if ((rte_eal_process_type() == RTE_PROC_PRIMARY) &&
- tx_recover_qp(txq, txq_ctrl->ibv->qp) == 0) {
+ if (tx_recover_qp(txq_ctrl) == 0) {
txq->cq_ci++;
/* Release all the remaining buffers. */
return txq->elts_head;
return txq->elts_tail;
}
-/**
- * DPDK callback for TX.
- *
- * @param dpdk_txq
- * Generic pointer to TX queue structure.
- * @param[in] pkts
- * Packets to transmit.
- * @param pkts_n
- * Number of packets in array.
- *
- * @return
- * Number of packets successfully transmitted (<= pkts_n).
- */
-uint16_t
-mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
-{
- struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
- uint16_t elts_head = txq->elts_head;
- const uint16_t elts_n = 1 << txq->elts_n;
- const uint16_t elts_m = elts_n - 1;
- unsigned int i = 0;
- unsigned int j = 0;
- unsigned int k = 0;
- uint16_t max_elts;
- uint16_t max_wqe;
- unsigned int comp;
- volatile struct mlx5_wqe_ctrl *last_wqe = NULL;
- unsigned int segs_n = 0;
- const unsigned int max_inline = txq->max_inline;
- uint64_t addr_64;
-
- if (unlikely(!pkts_n))
- return 0;
- /* Prefetch first packet cacheline. */
- rte_prefetch0(*pkts);
- /* Start processing. */
- mlx5_tx_complete(txq);
- max_elts = (elts_n - (elts_head - txq->elts_tail));
- max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
- if (unlikely(!max_wqe))
- return 0;
- do {
- struct rte_mbuf *buf = *pkts; /* First_seg. */
- uint8_t *raw;
- volatile struct mlx5_wqe_v *wqe = NULL;
- volatile rte_v128u32_t *dseg = NULL;
- uint32_t length;
- unsigned int ds = 0;
- unsigned int sg = 0; /* counter of additional segs attached. */
- uintptr_t addr;
- uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE + 2;
- uint16_t tso_header_sz = 0;
- uint16_t ehdr;
- uint8_t cs_flags;
- uint8_t tso = txq->tso_en && (buf->ol_flags & PKT_TX_TCP_SEG);
- uint32_t swp_offsets = 0;
- uint8_t swp_types = 0;
- rte_be32_t metadata;
- uint16_t tso_segsz = 0;
-#ifdef MLX5_PMD_SOFT_COUNTERS
- uint32_t total_length = 0;
-#endif
- int ret;
-
- segs_n = buf->nb_segs;
- /*
- * Make sure there is enough room to store this packet and
- * that one ring entry remains unused.
- */
- assert(segs_n);
- if (max_elts < segs_n)
- break;
- max_elts -= segs_n;
- sg = --segs_n;
- if (unlikely(--max_wqe == 0))
- break;
- wqe = (volatile struct mlx5_wqe_v *)
- tx_mlx5_wqe(txq, txq->wqe_ci);
- rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
- if (pkts_n - i > 1)
- rte_prefetch0(*(pkts + 1));
- addr = rte_pktmbuf_mtod(buf, uintptr_t);
- length = DATA_LEN(buf);
- ehdr = (((uint8_t *)addr)[1] << 8) |
- ((uint8_t *)addr)[0];
-#ifdef MLX5_PMD_SOFT_COUNTERS
- total_length = length;
-#endif
- if (length < (MLX5_WQE_DWORD_SIZE + 2)) {
- txq->stats.oerrors++;
- break;
- }
- /* Update element. */
- (*txq->elts)[elts_head & elts_m] = buf;
- /* Prefetch next buffer data. */
- if (pkts_n - i > 1)
- rte_prefetch0(
- rte_pktmbuf_mtod(*(pkts + 1), volatile void *));
- cs_flags = txq_ol_cksum_to_cs(buf);
- txq_mbuf_to_swp(txq, buf, (uint8_t *)&swp_offsets, &swp_types);
- raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE;
- /* Copy metadata from mbuf if valid */
- metadata = buf->ol_flags & PKT_TX_METADATA ? buf->tx_metadata :
- 0;
- /* Replace the Ethernet type by the VLAN if necessary. */
- if (buf->ol_flags & PKT_TX_VLAN_PKT) {
- uint32_t vlan = rte_cpu_to_be_32(0x81000000 |
- buf->vlan_tci);
- unsigned int len = 2 * RTE_ETHER_ADDR_LEN - 2;
-
- addr += 2;
- length -= 2;
- /* Copy Destination and source mac address. */
- memcpy((uint8_t *)raw, ((uint8_t *)addr), len);
- /* Copy VLAN. */
- memcpy((uint8_t *)raw + len, &vlan, sizeof(vlan));
- /* Copy missing two bytes to end the DSeg. */
- memcpy((uint8_t *)raw + len + sizeof(vlan),
- ((uint8_t *)addr) + len, 2);
- addr += len + 2;
- length -= (len + 2);
- } else {
- memcpy((uint8_t *)raw, ((uint8_t *)addr) + 2,
- MLX5_WQE_DWORD_SIZE);
- length -= pkt_inline_sz;
- addr += pkt_inline_sz;
- }
- raw += MLX5_WQE_DWORD_SIZE;
- if (tso) {
- ret = inline_tso(txq, buf, &length,
- &addr, &pkt_inline_sz,
- &raw, &max_wqe,
- &tso_segsz, &tso_header_sz);
- if (ret == -EINVAL) {
- break;
- } else if (ret == -EAGAIN) {
- /* NOP WQE. */
- wqe->ctrl = (rte_v128u32_t){
- rte_cpu_to_be_32(txq->wqe_ci << 8),
- rte_cpu_to_be_32(txq->qp_num_8s | 1),
- rte_cpu_to_be_32
- (MLX5_COMP_ONLY_FIRST_ERR <<
- MLX5_COMP_MODE_OFFSET),
- 0,
- };
- ds = 1;
-#ifdef MLX5_PMD_SOFT_COUNTERS
- total_length = 0;
-#endif
- k++;
- goto next_wqe;
- }
- }
- /* Inline if enough room. */
- if (max_inline || tso) {
- uint32_t inl = 0;
- uintptr_t end = (uintptr_t)
- (((uintptr_t)txq->wqes) +
- (1 << txq->wqe_n) * MLX5_WQE_SIZE);
- unsigned int inline_room = max_inline *
- RTE_CACHE_LINE_SIZE -
- (pkt_inline_sz - 2) -
- !!tso * sizeof(inl);
- uintptr_t addr_end;
- unsigned int copy_b;
-
-pkt_inline:
- addr_end = RTE_ALIGN_FLOOR(addr + inline_room,
- RTE_CACHE_LINE_SIZE);
- copy_b = (addr_end > addr) ?
- RTE_MIN((addr_end - addr), length) : 0;
- if (copy_b && ((end - (uintptr_t)raw) >
- (copy_b + sizeof(inl)))) {
- /*
- * One Dseg remains in the current WQE. To
- * keep the computation positive, it is
- * removed after the bytes to Dseg conversion.
- */
- uint16_t n = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4;
-
- if (unlikely(max_wqe < n))
- break;
- max_wqe -= n;
- if (tso) {
- assert(inl == 0);
- inl = rte_cpu_to_be_32(copy_b |
- MLX5_INLINE_SEG);
- rte_memcpy((void *)raw,
- (void *)&inl, sizeof(inl));
- raw += sizeof(inl);
- pkt_inline_sz += sizeof(inl);
- }
- rte_memcpy((void *)raw, (void *)addr, copy_b);
- addr += copy_b;
- length -= copy_b;
- pkt_inline_sz += copy_b;
- }
- /*
- * 2 DWORDs consumed by the WQE header + ETH segment +
- * the size of the inline part of the packet.
- */
- ds = 2 + MLX5_WQE_DS(pkt_inline_sz - 2);
- if (length > 0) {
- if (ds % (MLX5_WQE_SIZE /
- MLX5_WQE_DWORD_SIZE) == 0) {
- if (unlikely(--max_wqe == 0))
- break;
- dseg = (volatile rte_v128u32_t *)
- tx_mlx5_wqe(txq, txq->wqe_ci +
- ds / 4);
- } else {
- dseg = (volatile rte_v128u32_t *)
- ((uintptr_t)wqe +
- (ds * MLX5_WQE_DWORD_SIZE));
- }
- goto use_dseg;
- } else if (!segs_n) {
- goto next_pkt;
- } else {
- /*
- * Further inline the next segment only for
- * non-TSO packets.
- */
- if (!tso) {
- raw += copy_b;
- inline_room -= copy_b;
- } else {
- inline_room = 0;
- }
- /* Move to the next segment. */
- --segs_n;
- buf = buf->next;
- assert(buf);
- addr = rte_pktmbuf_mtod(buf, uintptr_t);
- length = DATA_LEN(buf);
-#ifdef MLX5_PMD_SOFT_COUNTERS
- total_length += length;
-#endif
- (*txq->elts)[++elts_head & elts_m] = buf;
- goto pkt_inline;
- }
- } else {
- /*
- * No inline has been done in the packet, only the
- * Ethernet Header as been stored.
- */
- dseg = (volatile rte_v128u32_t *)
- ((uintptr_t)wqe + (3 * MLX5_WQE_DWORD_SIZE));
- ds = 3;
-use_dseg:
- /* Add the remaining packet as a simple ds. */
- addr_64 = rte_cpu_to_be_64(addr);
- *dseg = (rte_v128u32_t){
- rte_cpu_to_be_32(length),
- mlx5_tx_mb2mr(txq, buf),
- addr_64,
- addr_64 >> 32,
- };
- ++ds;
- if (!segs_n)
- goto next_pkt;
- }
-next_seg:
- assert(buf);
- assert(ds);
- assert(wqe);
- /*
- * Spill on next WQE when the current one does not have
- * enough room left. Size of WQE must a be a multiple
- * of data segment size.
- */
- assert(!(MLX5_WQE_SIZE % MLX5_WQE_DWORD_SIZE));
- if (!(ds % (MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE))) {
- if (unlikely(--max_wqe == 0))
- break;
- dseg = (volatile rte_v128u32_t *)
- tx_mlx5_wqe(txq, txq->wqe_ci + ds / 4);
- rte_prefetch0(tx_mlx5_wqe(txq,
- txq->wqe_ci + ds / 4 + 1));
- } else {
- ++dseg;
- }
- ++ds;
- buf = buf->next;
- assert(buf);
- length = DATA_LEN(buf);
-#ifdef MLX5_PMD_SOFT_COUNTERS
- total_length += length;
-#endif
- /* Store segment information. */
- addr_64 = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t));
- *dseg = (rte_v128u32_t){
- rte_cpu_to_be_32(length),
- mlx5_tx_mb2mr(txq, buf),
- addr_64,
- addr_64 >> 32,
- };
- (*txq->elts)[++elts_head & elts_m] = buf;
- if (--segs_n)
- goto next_seg;
-next_pkt:
- if (ds > MLX5_DSEG_MAX) {
- txq->stats.oerrors++;
- break;
- }
- ++elts_head;
- ++pkts;
- ++i;
- j += sg;
- /* Initialize known and common part of the WQE structure. */
- if (tso) {
- wqe->ctrl = (rte_v128u32_t){
- rte_cpu_to_be_32((txq->wqe_ci << 8) |
- MLX5_OPCODE_TSO),
- rte_cpu_to_be_32(txq->qp_num_8s | ds),
- rte_cpu_to_be_32(MLX5_COMP_ONLY_FIRST_ERR <<
- MLX5_COMP_MODE_OFFSET),
- 0,
- };
- wqe->eseg = (rte_v128u32_t){
- swp_offsets,
- cs_flags | (swp_types << 8) |
- (rte_cpu_to_be_16(tso_segsz) << 16),
- metadata,
- (ehdr << 16) | rte_cpu_to_be_16(tso_header_sz),
- };
- } else {
- wqe->ctrl = (rte_v128u32_t){
- rte_cpu_to_be_32((txq->wqe_ci << 8) |
- MLX5_OPCODE_SEND),
- rte_cpu_to_be_32(txq->qp_num_8s | ds),
- rte_cpu_to_be_32(MLX5_COMP_ONLY_FIRST_ERR <<
- MLX5_COMP_MODE_OFFSET),
- 0,
- };
- wqe->eseg = (rte_v128u32_t){
- swp_offsets,
- cs_flags | (swp_types << 8),
- metadata,
- (ehdr << 16) | rte_cpu_to_be_16(pkt_inline_sz),
- };
- }
-next_wqe:
- txq->wqe_ci += (ds + 3) / 4;
- /* Save the last successful WQE for completion request */
- last_wqe = (volatile struct mlx5_wqe_ctrl *)wqe;
-#ifdef MLX5_PMD_SOFT_COUNTERS
- /* Increment sent bytes counter. */
- txq->stats.obytes += total_length;
-#endif
- } while (i < pkts_n);
- /* Take a shortcut if nothing must be sent. */
- if (unlikely((i + k) == 0))
- return 0;
- txq->elts_head += (i + j);
- /* Check whether completion threshold has been reached. */
- comp = txq->elts_comp + i + j + k;
- if (comp >= MLX5_TX_COMP_THRESH) {
- /* A CQE slot must always be available. */
- assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
- /* Request completion on last WQE. */
- last_wqe->ctrl2 = rte_cpu_to_be_32(MLX5_COMP_ALWAYS <<
- MLX5_COMP_MODE_OFFSET);
- /* Save elts_head in unused "immediate" field of WQE. */
- last_wqe->ctrl3 = txq->elts_head;
- txq->elts_comp = 0;
- } else {
- txq->elts_comp = comp;
- }
-#ifdef MLX5_PMD_SOFT_COUNTERS
- /* Increment sent packets counter. */
- txq->stats.opackets += i;
-#endif
- /* Ring QP doorbell. */
- mlx5_tx_dbrec(txq, (volatile struct mlx5_wqe *)last_wqe);
- return i;
-}
-
-/**
- * Open a MPW session.
- *
- * @param txq
- * Pointer to TX queue structure.
- * @param mpw
- * Pointer to MPW session structure.
- * @param length
- * Packet length.
- */
-static inline void
-mlx5_mpw_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw, uint32_t length)
-{
- uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
- volatile struct mlx5_wqe_data_seg (*dseg)[MLX5_MPW_DSEG_MAX] =
- (volatile struct mlx5_wqe_data_seg (*)[])
- tx_mlx5_wqe(txq, idx + 1);
-
- mpw->state = MLX5_MPW_STATE_OPENED;
- mpw->pkts_n = 0;
- mpw->len = length;
- mpw->total_len = 0;
- mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
- mpw->wqe->eseg.mss = rte_cpu_to_be_16(length);
- mpw->wqe->eseg.inline_hdr_sz = 0;
- mpw->wqe->eseg.rsvd0 = 0;
- mpw->wqe->eseg.rsvd1 = 0;
- mpw->wqe->eseg.flow_table_metadata = 0;
- mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) |
- (txq->wqe_ci << 8) |
- MLX5_OPCODE_TSO);
- mpw->wqe->ctrl[2] = rte_cpu_to_be_32(MLX5_COMP_ONLY_FIRST_ERR <<
- MLX5_COMP_MODE_OFFSET);
- mpw->wqe->ctrl[3] = 0;
- mpw->data.dseg[0] = (volatile struct mlx5_wqe_data_seg *)
- (((uintptr_t)mpw->wqe) + (2 * MLX5_WQE_DWORD_SIZE));
- mpw->data.dseg[1] = (volatile struct mlx5_wqe_data_seg *)
- (((uintptr_t)mpw->wqe) + (3 * MLX5_WQE_DWORD_SIZE));
- mpw->data.dseg[2] = &(*dseg)[0];
- mpw->data.dseg[3] = &(*dseg)[1];
- mpw->data.dseg[4] = &(*dseg)[2];
-}
-
-/**
- * Close a MPW session.
- *
- * @param txq
- * Pointer to TX queue structure.
- * @param mpw
- * Pointer to MPW session structure.
- */
-static inline void
-mlx5_mpw_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
-{
- unsigned int num = mpw->pkts_n;
-
- /*
- * Store size in multiple of 16 bytes. Control and Ethernet segments
- * count as 2.
- */
- mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s | (2 + num));
- mpw->state = MLX5_MPW_STATE_CLOSED;
- if (num < 3)
- ++txq->wqe_ci;
- else
- txq->wqe_ci += 2;
- rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
- rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
-}
-
-/**
- * DPDK callback for TX with MPW support.
- *
- * @param dpdk_txq
- * Generic pointer to TX queue structure.
- * @param[in] pkts
- * Packets to transmit.
- * @param pkts_n
- * Number of packets in array.
- *
- * @return
- * Number of packets successfully transmitted (<= pkts_n).
- */
-uint16_t
-mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
-{
- struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
- uint16_t elts_head = txq->elts_head;
- const uint16_t elts_n = 1 << txq->elts_n;
- const uint16_t elts_m = elts_n - 1;
- unsigned int i = 0;
- unsigned int j = 0;
- uint16_t max_elts;
- uint16_t max_wqe;
- unsigned int comp;
- struct mlx5_mpw mpw = {
- .state = MLX5_MPW_STATE_CLOSED,
- };
-
- if (unlikely(!pkts_n))
- return 0;
- /* Prefetch first packet cacheline. */
- rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
- rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
- /* Start processing. */
- mlx5_tx_complete(txq);
- max_elts = (elts_n - (elts_head - txq->elts_tail));
- max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
- if (unlikely(!max_wqe))
- return 0;
- do {
- struct rte_mbuf *buf = *(pkts++);
- uint32_t length;
- unsigned int segs_n = buf->nb_segs;
- uint32_t cs_flags;
- rte_be32_t metadata;
-
- /*
- * Make sure there is enough room to store this packet and
- * that one ring entry remains unused.
- */
- assert(segs_n);
- if (max_elts < segs_n)
- break;
- /* Do not bother with large packets MPW cannot handle. */
- if (segs_n > MLX5_MPW_DSEG_MAX) {
- txq->stats.oerrors++;
- break;
- }
- max_elts -= segs_n;
- --pkts_n;
- cs_flags = txq_ol_cksum_to_cs(buf);
- /* Copy metadata from mbuf if valid */
- metadata = buf->ol_flags & PKT_TX_METADATA ? buf->tx_metadata :
- 0;
- /* Retrieve packet information. */
- length = PKT_LEN(buf);
- assert(length);
- /* Start new session if packet differs. */
- if ((mpw.state == MLX5_MPW_STATE_OPENED) &&
- ((mpw.len != length) ||
- (segs_n != 1) ||
- (mpw.wqe->eseg.flow_table_metadata != metadata) ||
- (mpw.wqe->eseg.cs_flags != cs_flags)))
- mlx5_mpw_close(txq, &mpw);
- if (mpw.state == MLX5_MPW_STATE_CLOSED) {
- /*
- * Multi-Packet WQE consumes at most two WQE.
- * mlx5_mpw_new() expects to be able to use such
- * resources.
- */
- if (unlikely(max_wqe < 2))
- break;
- max_wqe -= 2;
- mlx5_mpw_new(txq, &mpw, length);
- mpw.wqe->eseg.cs_flags = cs_flags;
- mpw.wqe->eseg.flow_table_metadata = metadata;
- }
- /* Multi-segment packets must be alone in their MPW. */
- assert((segs_n == 1) || (mpw.pkts_n == 0));
-#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
- length = 0;
-#endif
- do {
- volatile struct mlx5_wqe_data_seg *dseg;
- uintptr_t addr;
-
- assert(buf);
- (*txq->elts)[elts_head++ & elts_m] = buf;
- dseg = mpw.data.dseg[mpw.pkts_n];
- addr = rte_pktmbuf_mtod(buf, uintptr_t);
- *dseg = (struct mlx5_wqe_data_seg){
- .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)),
- .lkey = mlx5_tx_mb2mr(txq, buf),
- .addr = rte_cpu_to_be_64(addr),
- };
-#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
- length += DATA_LEN(buf);
-#endif
- buf = buf->next;
- ++mpw.pkts_n;
- ++j;
- } while (--segs_n);
- assert(length == mpw.len);
- if (mpw.pkts_n == MLX5_MPW_DSEG_MAX)
- mlx5_mpw_close(txq, &mpw);
-#ifdef MLX5_PMD_SOFT_COUNTERS
- /* Increment sent bytes counter. */
- txq->stats.obytes += length;
-#endif
- ++i;
- } while (pkts_n);
- /* Take a shortcut if nothing must be sent. */
- if (unlikely(i == 0))
- return 0;
- /* Check whether completion threshold has been reached. */
- /* "j" includes both packets and segments. */
- comp = txq->elts_comp + j;
- if (comp >= MLX5_TX_COMP_THRESH) {
- volatile struct mlx5_wqe *wqe = mpw.wqe;
-
- /* A CQE slot must always be available. */
- assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
- /* Request completion on last WQE. */
- wqe->ctrl[2] = rte_cpu_to_be_32(MLX5_COMP_ALWAYS <<
- MLX5_COMP_MODE_OFFSET);
- /* Save elts_head in unused "immediate" field of WQE. */
- wqe->ctrl[3] = elts_head;
- txq->elts_comp = 0;
- } else {
- txq->elts_comp = comp;
- }
-#ifdef MLX5_PMD_SOFT_COUNTERS
- /* Increment sent packets counter. */
- txq->stats.opackets += i;
-#endif
- /* Ring QP doorbell. */
- if (mpw.state == MLX5_MPW_STATE_OPENED)
- mlx5_mpw_close(txq, &mpw);
- mlx5_tx_dbrec(txq, mpw.wqe);
- txq->elts_head = elts_head;
- return i;
-}
-
-/**
- * Open a MPW inline session.
- *
- * @param txq
- * Pointer to TX queue structure.
- * @param mpw
- * Pointer to MPW session structure.
- * @param length
- * Packet length.
- */
-static inline void
-mlx5_mpw_inline_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw,
- uint32_t length)
-{
- uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
- struct mlx5_wqe_inl_small *inl;
-
- mpw->state = MLX5_MPW_INL_STATE_OPENED;
- mpw->pkts_n = 0;
- mpw->len = length;
- mpw->total_len = 0;
- mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
- mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) |
- (txq->wqe_ci << 8) |
- MLX5_OPCODE_TSO);
- mpw->wqe->ctrl[2] = rte_cpu_to_be_32(MLX5_COMP_ONLY_FIRST_ERR <<
- MLX5_COMP_MODE_OFFSET);
- mpw->wqe->ctrl[3] = 0;
- mpw->wqe->eseg.mss = rte_cpu_to_be_16(length);
- mpw->wqe->eseg.inline_hdr_sz = 0;
- mpw->wqe->eseg.cs_flags = 0;
- mpw->wqe->eseg.rsvd0 = 0;
- mpw->wqe->eseg.rsvd1 = 0;
- mpw->wqe->eseg.flow_table_metadata = 0;
- inl = (struct mlx5_wqe_inl_small *)
- (((uintptr_t)mpw->wqe) + 2 * MLX5_WQE_DWORD_SIZE);
- mpw->data.raw = (uint8_t *)&inl->raw;
-}
-
-/**
- * Close a MPW inline session.
- *
- * @param txq
- * Pointer to TX queue structure.
- * @param mpw
- * Pointer to MPW session structure.
- */
-static inline void
-mlx5_mpw_inline_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
-{
- unsigned int size;
- struct mlx5_wqe_inl_small *inl = (struct mlx5_wqe_inl_small *)
- (((uintptr_t)mpw->wqe) + (2 * MLX5_WQE_DWORD_SIZE));
-
- size = MLX5_WQE_SIZE - MLX5_MWQE64_INL_DATA + mpw->total_len;
- /*
- * Store size in multiple of 16 bytes. Control and Ethernet segments
- * count as 2.
- */
- mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s |
- MLX5_WQE_DS(size));
- mpw->state = MLX5_MPW_STATE_CLOSED;
- inl->byte_cnt = rte_cpu_to_be_32(mpw->total_len | MLX5_INLINE_SEG);
- txq->wqe_ci += (size + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE;
-}
-
-/**
- * DPDK callback for TX with MPW inline support.
- *
- * @param dpdk_txq
- * Generic pointer to TX queue structure.
- * @param[in] pkts
- * Packets to transmit.
- * @param pkts_n
- * Number of packets in array.
- *
- * @return
- * Number of packets successfully transmitted (<= pkts_n).
- */
-uint16_t
-mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
- uint16_t pkts_n)
-{
- struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
- uint16_t elts_head = txq->elts_head;
- const uint16_t elts_n = 1 << txq->elts_n;
- const uint16_t elts_m = elts_n - 1;
- unsigned int i = 0;
- unsigned int j = 0;
- uint16_t max_elts;
- uint16_t max_wqe;
- unsigned int comp;
- unsigned int inline_room = txq->max_inline * RTE_CACHE_LINE_SIZE;
- struct mlx5_mpw mpw = {
- .state = MLX5_MPW_STATE_CLOSED,
- };
- /*
- * Compute the maximum number of WQE which can be consumed by inline
- * code.
- * - 2 DSEG for:
- * - 1 control segment,
- * - 1 Ethernet segment,
- * - N Dseg from the inline request.
- */
- const unsigned int wqe_inl_n =
- ((2 * MLX5_WQE_DWORD_SIZE +
- txq->max_inline * RTE_CACHE_LINE_SIZE) +
- RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
-
- if (unlikely(!pkts_n))
- return 0;
- /* Prefetch first packet cacheline. */
- rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
- rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
- /* Start processing. */
- mlx5_tx_complete(txq);
- max_elts = (elts_n - (elts_head - txq->elts_tail));
- do {
- struct rte_mbuf *buf = *(pkts++);
- uintptr_t addr;
- uint32_t length;
- unsigned int segs_n = buf->nb_segs;
- uint8_t cs_flags;
- rte_be32_t metadata;
-
- /*
- * Make sure there is enough room to store this packet and
- * that one ring entry remains unused.
- */
- assert(segs_n);
- if (max_elts < segs_n)
- break;
- /* Do not bother with large packets MPW cannot handle. */
- if (segs_n > MLX5_MPW_DSEG_MAX) {
- txq->stats.oerrors++;
- break;
- }
- max_elts -= segs_n;
- --pkts_n;
- /*
- * Compute max_wqe in case less WQE were consumed in previous
- * iteration.
- */
- max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
- cs_flags = txq_ol_cksum_to_cs(buf);
- /* Copy metadata from mbuf if valid */
- metadata = buf->ol_flags & PKT_TX_METADATA ? buf->tx_metadata :
- 0;
- /* Retrieve packet information. */
- length = PKT_LEN(buf);
- /* Start new session if packet differs. */
- if (mpw.state == MLX5_MPW_STATE_OPENED) {
- if ((mpw.len != length) ||
- (segs_n != 1) ||
- (mpw.wqe->eseg.flow_table_metadata != metadata) ||
- (mpw.wqe->eseg.cs_flags != cs_flags))
- mlx5_mpw_close(txq, &mpw);
- } else if (mpw.state == MLX5_MPW_INL_STATE_OPENED) {
- if ((mpw.len != length) ||
- (segs_n != 1) ||
- (length > inline_room) ||
- (mpw.wqe->eseg.flow_table_metadata != metadata) ||
- (mpw.wqe->eseg.cs_flags != cs_flags)) {
- mlx5_mpw_inline_close(txq, &mpw);
- inline_room =
- txq->max_inline * RTE_CACHE_LINE_SIZE;
- }
- }
- if (mpw.state == MLX5_MPW_STATE_CLOSED) {
- if ((segs_n != 1) ||
- (length > inline_room)) {
- /*
- * Multi-Packet WQE consumes at most two WQE.
- * mlx5_mpw_new() expects to be able to use
- * such resources.
- */
- if (unlikely(max_wqe < 2))
- break;
- max_wqe -= 2;
- mlx5_mpw_new(txq, &mpw, length);
- mpw.wqe->eseg.cs_flags = cs_flags;
- mpw.wqe->eseg.flow_table_metadata = metadata;
- } else {
- if (unlikely(max_wqe < wqe_inl_n))
- break;
- max_wqe -= wqe_inl_n;
- mlx5_mpw_inline_new(txq, &mpw, length);
- mpw.wqe->eseg.cs_flags = cs_flags;
- mpw.wqe->eseg.flow_table_metadata = metadata;
- }
- }
- /* Multi-segment packets must be alone in their MPW. */
- assert((segs_n == 1) || (mpw.pkts_n == 0));
- if (mpw.state == MLX5_MPW_STATE_OPENED) {
- assert(inline_room ==
- txq->max_inline * RTE_CACHE_LINE_SIZE);
-#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
- length = 0;
-#endif
- do {
- volatile struct mlx5_wqe_data_seg *dseg;
-
- assert(buf);
- (*txq->elts)[elts_head++ & elts_m] = buf;
- dseg = mpw.data.dseg[mpw.pkts_n];
- addr = rte_pktmbuf_mtod(buf, uintptr_t);
- *dseg = (struct mlx5_wqe_data_seg){
- .byte_count =
- rte_cpu_to_be_32(DATA_LEN(buf)),
- .lkey = mlx5_tx_mb2mr(txq, buf),
- .addr = rte_cpu_to_be_64(addr),
- };
-#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
- length += DATA_LEN(buf);
-#endif
- buf = buf->next;
- ++mpw.pkts_n;
- ++j;
- } while (--segs_n);
- assert(length == mpw.len);
- if (mpw.pkts_n == MLX5_MPW_DSEG_MAX)
- mlx5_mpw_close(txq, &mpw);
- } else {
- unsigned int max;
-
- assert(mpw.state == MLX5_MPW_INL_STATE_OPENED);
- assert(length <= inline_room);
- assert(length == DATA_LEN(buf));
- addr = rte_pktmbuf_mtod(buf, uintptr_t);
- (*txq->elts)[elts_head++ & elts_m] = buf;
- /* Maximum number of bytes before wrapping. */
- max = ((((uintptr_t)(txq->wqes)) +
- (1 << txq->wqe_n) *
- MLX5_WQE_SIZE) -
- (uintptr_t)mpw.data.raw);
- if (length > max) {
- rte_memcpy((void *)(uintptr_t)mpw.data.raw,
- (void *)addr,
- max);
- mpw.data.raw = (volatile void *)txq->wqes;
- rte_memcpy((void *)(uintptr_t)mpw.data.raw,
- (void *)(addr + max),
- length - max);
- mpw.data.raw += length - max;
- } else {
- rte_memcpy((void *)(uintptr_t)mpw.data.raw,
- (void *)addr,
- length);
-
- if (length == max)
- mpw.data.raw =
- (volatile void *)txq->wqes;
- else
- mpw.data.raw += length;
- }
- ++mpw.pkts_n;
- mpw.total_len += length;
- ++j;
- if (mpw.pkts_n == MLX5_MPW_DSEG_MAX) {
- mlx5_mpw_inline_close(txq, &mpw);
- inline_room =
- txq->max_inline * RTE_CACHE_LINE_SIZE;
- } else {
- inline_room -= length;
- }
- }
-#ifdef MLX5_PMD_SOFT_COUNTERS
- /* Increment sent bytes counter. */
- txq->stats.obytes += length;
-#endif
- ++i;
- } while (pkts_n);
- /* Take a shortcut if nothing must be sent. */
- if (unlikely(i == 0))
- return 0;
- /* Check whether completion threshold has been reached. */
- /* "j" includes both packets and segments. */
- comp = txq->elts_comp + j;
- if (comp >= MLX5_TX_COMP_THRESH) {
- volatile struct mlx5_wqe *wqe = mpw.wqe;
-
- /* A CQE slot must always be available. */
- assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
- /* Request completion on last WQE. */
- wqe->ctrl[2] = rte_cpu_to_be_32(MLX5_COMP_ALWAYS <<
- MLX5_COMP_MODE_OFFSET);
- /* Save elts_head in unused "immediate" field of WQE. */
- wqe->ctrl[3] = elts_head;
- txq->elts_comp = 0;
- } else {
- txq->elts_comp = comp;
- }
-#ifdef MLX5_PMD_SOFT_COUNTERS
- /* Increment sent packets counter. */
- txq->stats.opackets += i;
-#endif
- /* Ring QP doorbell. */
- if (mpw.state == MLX5_MPW_INL_STATE_OPENED)
- mlx5_mpw_inline_close(txq, &mpw);
- else if (mpw.state == MLX5_MPW_STATE_OPENED)
- mlx5_mpw_close(txq, &mpw);
- mlx5_tx_dbrec(txq, mpw.wqe);
- txq->elts_head = elts_head;
- return i;
-}
-
-/**
- * Open an Enhanced MPW session.
- *
- * @param txq
- * Pointer to TX queue structure.
- * @param mpw
- * Pointer to MPW session structure.
- * @param length
- * Packet length.
- */
-static inline void
-mlx5_empw_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw, int padding)
-{
- uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
-
- mpw->state = MLX5_MPW_ENHANCED_STATE_OPENED;
- mpw->pkts_n = 0;
- mpw->total_len = sizeof(struct mlx5_wqe);
- mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
- mpw->wqe->ctrl[0] =
- rte_cpu_to_be_32((MLX5_OPC_MOD_ENHANCED_MPSW << 24) |
- (txq->wqe_ci << 8) |
- MLX5_OPCODE_ENHANCED_MPSW);
- mpw->wqe->ctrl[2] = rte_cpu_to_be_32(MLX5_COMP_ONLY_FIRST_ERR <<
- MLX5_COMP_MODE_OFFSET);
- mpw->wqe->ctrl[3] = 0;
- memset((void *)(uintptr_t)&mpw->wqe->eseg, 0, MLX5_WQE_DWORD_SIZE);
- if (unlikely(padding)) {
- uintptr_t addr = (uintptr_t)(mpw->wqe + 1);
-
- /* Pad the first 2 DWORDs with zero-length inline header. */
- *(volatile uint32_t *)addr = rte_cpu_to_be_32(MLX5_INLINE_SEG);
- *(volatile uint32_t *)(addr + MLX5_WQE_DWORD_SIZE) =
- rte_cpu_to_be_32(MLX5_INLINE_SEG);
- mpw->total_len += 2 * MLX5_WQE_DWORD_SIZE;
- /* Start from the next WQEBB. */
- mpw->data.raw = (volatile void *)(tx_mlx5_wqe(txq, idx + 1));
- } else {
- mpw->data.raw = (volatile void *)(mpw->wqe + 1);
- }
-}
-
-/**
- * Close an Enhanced MPW session.
- *
- * @param txq
- * Pointer to TX queue structure.
- * @param mpw
- * Pointer to MPW session structure.
- *
- * @return
- * Number of consumed WQEs.
- */
-static inline uint16_t
-mlx5_empw_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
-{
- uint16_t ret;
-
- /* Store size in multiple of 16 bytes. Control and Ethernet segments
- * count as 2.
- */
- mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s |
- MLX5_WQE_DS(mpw->total_len));
- mpw->state = MLX5_MPW_STATE_CLOSED;
- ret = (mpw->total_len + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE;
- txq->wqe_ci += ret;
- return ret;
-}
-
-/**
- * TX with Enhanced MPW support.
- *
- * @param txq
- * Pointer to TX queue structure.
- * @param[in] pkts
- * Packets to transmit.
- * @param pkts_n
- * Number of packets in array.
- *
- * @return
- * Number of packets successfully transmitted (<= pkts_n).
- */
-static inline uint16_t
-txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
- uint16_t pkts_n)
-{
- uint16_t elts_head = txq->elts_head;
- const uint16_t elts_n = 1 << txq->elts_n;
- const uint16_t elts_m = elts_n - 1;
- unsigned int i = 0;
- unsigned int j = 0;
- uint16_t max_elts;
- uint16_t max_wqe;
- unsigned int max_inline = txq->max_inline * RTE_CACHE_LINE_SIZE;
- unsigned int mpw_room = 0;
- unsigned int inl_pad = 0;
- uint32_t inl_hdr;
- uint64_t addr_64;
- struct mlx5_mpw mpw = {
- .state = MLX5_MPW_STATE_CLOSED,
- };
-
- if (unlikely(!pkts_n))
- return 0;
- /* Start processing. */
- mlx5_tx_complete(txq);
- max_elts = (elts_n - (elts_head - txq->elts_tail));
- max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
- if (unlikely(!max_wqe))
- return 0;
- do {
- struct rte_mbuf *buf = *(pkts++);
- uintptr_t addr;
- unsigned int do_inline = 0; /* Whether inline is possible. */
- uint32_t length;
- uint8_t cs_flags;
- rte_be32_t metadata;
-
- /* Multi-segmented packet is handled in slow-path outside. */
- assert(NB_SEGS(buf) == 1);
- /* Make sure there is enough room to store this packet. */
- if (max_elts - j == 0)
- break;
- cs_flags = txq_ol_cksum_to_cs(buf);
- /* Copy metadata from mbuf if valid */
- metadata = buf->ol_flags & PKT_TX_METADATA ? buf->tx_metadata :
- 0;
- /* Retrieve packet information. */
- length = PKT_LEN(buf);
- /* Start new session if:
- * - multi-segment packet
- * - no space left even for a dseg
- * - next packet can be inlined with a new WQE
- * - cs_flag differs
- */
- if (mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED) {
- if ((inl_pad + sizeof(struct mlx5_wqe_data_seg) >
- mpw_room) ||
- (length <= txq->inline_max_packet_sz &&
- inl_pad + sizeof(inl_hdr) + length >
- mpw_room) ||
- (mpw.wqe->eseg.flow_table_metadata != metadata) ||
- (mpw.wqe->eseg.cs_flags != cs_flags))
- max_wqe -= mlx5_empw_close(txq, &mpw);
- }
- if (unlikely(mpw.state == MLX5_MPW_STATE_CLOSED)) {
- /* In Enhanced MPW, inline as much as the budget is
- * allowed. The remaining space is to be filled with
- * dsegs. If the title WQEBB isn't padded, it will have
- * 2 dsegs there.
- */
- mpw_room = RTE_MIN(MLX5_WQE_SIZE_MAX,
- (max_inline ? max_inline :
- pkts_n * MLX5_WQE_DWORD_SIZE) +
- MLX5_WQE_SIZE);
- if (unlikely(max_wqe * MLX5_WQE_SIZE < mpw_room))
- break;
- /* Don't pad the title WQEBB to not waste WQ. */
- mlx5_empw_new(txq, &mpw, 0);
- mpw_room -= mpw.total_len;
- inl_pad = 0;
- do_inline = length <= txq->inline_max_packet_sz &&
- sizeof(inl_hdr) + length <= mpw_room &&
- !txq->mpw_hdr_dseg;
- mpw.wqe->eseg.cs_flags = cs_flags;
- mpw.wqe->eseg.flow_table_metadata = metadata;
- } else {
- /* Evaluate whether the next packet can be inlined.
- * Inlininig is possible when:
- * - length is less than configured value
- * - length fits for remaining space
- * - not required to fill the title WQEBB with dsegs
- */
- do_inline =
- length <= txq->inline_max_packet_sz &&
- inl_pad + sizeof(inl_hdr) + length <=
- mpw_room &&
- (!txq->mpw_hdr_dseg ||
- mpw.total_len >= MLX5_WQE_SIZE);
- }
- if (max_inline && do_inline) {
- /* Inline packet into WQE. */
- unsigned int max;
-
- assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED);
- assert(length == DATA_LEN(buf));
- inl_hdr = rte_cpu_to_be_32(length | MLX5_INLINE_SEG);
- addr = rte_pktmbuf_mtod(buf, uintptr_t);
- mpw.data.raw = (volatile void *)
- ((uintptr_t)mpw.data.raw + inl_pad);
- max = tx_mlx5_wq_tailroom(txq,
- (void *)(uintptr_t)mpw.data.raw);
- /* Copy inline header. */
- mpw.data.raw = (volatile void *)
- mlx5_copy_to_wq(
- (void *)(uintptr_t)mpw.data.raw,
- &inl_hdr,
- sizeof(inl_hdr),
- (void *)(uintptr_t)txq->wqes,
- max);
- max = tx_mlx5_wq_tailroom(txq,
- (void *)(uintptr_t)mpw.data.raw);
- /* Copy packet data. */
- mpw.data.raw = (volatile void *)
- mlx5_copy_to_wq(
- (void *)(uintptr_t)mpw.data.raw,
- (void *)addr,
- length,
- (void *)(uintptr_t)txq->wqes,
- max);
- ++mpw.pkts_n;
- mpw.total_len += (inl_pad + sizeof(inl_hdr) + length);
- /* No need to get completion as the entire packet is
- * copied to WQ. Free the buf right away.
- */
- rte_pktmbuf_free_seg(buf);
- mpw_room -= (inl_pad + sizeof(inl_hdr) + length);
- /* Add pad in the next packet if any. */
- inl_pad = (((uintptr_t)mpw.data.raw +
- (MLX5_WQE_DWORD_SIZE - 1)) &
- ~(MLX5_WQE_DWORD_SIZE - 1)) -
- (uintptr_t)mpw.data.raw;
- } else {
- /* No inline. Load a dseg of packet pointer. */
- volatile rte_v128u32_t *dseg;
-
- assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED);
- assert((inl_pad + sizeof(*dseg)) <= mpw_room);
- assert(length == DATA_LEN(buf));
- if (!tx_mlx5_wq_tailroom(txq,
- (void *)((uintptr_t)mpw.data.raw
- + inl_pad)))
- dseg = (volatile void *)txq->wqes;
- else
- dseg = (volatile void *)
- ((uintptr_t)mpw.data.raw +
- inl_pad);
- (*txq->elts)[elts_head++ & elts_m] = buf;
- addr_64 = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf,
- uintptr_t));
- *dseg = (rte_v128u32_t) {
- rte_cpu_to_be_32(length),
- mlx5_tx_mb2mr(txq, buf),
- addr_64,
- addr_64 >> 32,
- };
- mpw.data.raw = (volatile void *)(dseg + 1);
- mpw.total_len += (inl_pad + sizeof(*dseg));
- ++j;
- ++mpw.pkts_n;
- mpw_room -= (inl_pad + sizeof(*dseg));
- inl_pad = 0;
- }
-#ifdef MLX5_PMD_SOFT_COUNTERS
- /* Increment sent bytes counter. */
- txq->stats.obytes += length;
-#endif
- ++i;
- } while (i < pkts_n);
- /* Take a shortcut if nothing must be sent. */
- if (unlikely(i == 0))
- return 0;
- /* Check whether completion threshold has been reached. */
- if (txq->elts_comp + j >= MLX5_TX_COMP_THRESH ||
- (uint16_t)(txq->wqe_ci - txq->mpw_comp) >=
- (1 << txq->wqe_n) / MLX5_TX_COMP_THRESH_INLINE_DIV) {
- volatile struct mlx5_wqe *wqe = mpw.wqe;
-
- /* A CQE slot must always be available. */
- assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
- /* Request completion on last WQE. */
- wqe->ctrl[2] = rte_cpu_to_be_32(MLX5_COMP_ALWAYS <<
- MLX5_COMP_MODE_OFFSET);
- /* Save elts_head in unused "immediate" field of WQE. */
- wqe->ctrl[3] = elts_head;
- txq->elts_comp = 0;
- txq->mpw_comp = txq->wqe_ci;
- } else {
- txq->elts_comp += j;
- }
-#ifdef MLX5_PMD_SOFT_COUNTERS
- /* Increment sent packets counter. */
- txq->stats.opackets += i;
-#endif
- if (mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED)
- mlx5_empw_close(txq, &mpw);
- /* Ring QP doorbell. */
- mlx5_tx_dbrec(txq, mpw.wqe);
- txq->elts_head = elts_head;
- return i;
-}
-
-/**
- * DPDK callback for TX with Enhanced MPW support.
- *
- * @param dpdk_txq
- * Generic pointer to TX queue structure.
- * @param[in] pkts
- * Packets to transmit.
- * @param pkts_n
- * Number of packets in array.
- *
- * @return
- * Number of packets successfully transmitted (<= pkts_n).
- */
-uint16_t
-mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
-{
- struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
- uint16_t nb_tx = 0;
-
- while (pkts_n > nb_tx) {
- uint16_t n;
- uint16_t ret;
-
- n = txq_count_contig_multi_seg(&pkts[nb_tx], pkts_n - nb_tx);
- if (n) {
- ret = mlx5_tx_burst(dpdk_txq, &pkts[nb_tx], n);
- if (!ret)
- break;
- nb_tx += ret;
- }
- n = txq_count_contig_single_seg(&pkts[nb_tx], pkts_n - nb_tx);
- if (n) {
- ret = txq_burst_empw(txq, &pkts[nb_tx], n);
- if (!ret)
- break;
- nb_tx += ret;
- }
- }
- return nb_tx;
-}
-
/**
* Translate RX completion flags to packet type.
*
rte_errno = errno;
return ret;
}
+ } else {
+ struct mlx5_txq_data *txq = (*priv->txqs)[sm->queue_id];
+ struct mlx5_txq_ctrl *txq_ctrl =
+ container_of(txq, struct mlx5_txq_ctrl, txq);
+ struct ibv_qp_attr mod = {
+ .qp_state = IBV_QPS_RESET,
+ .port_num = (uint8_t)priv->ibv_port,
+ };
+ struct ibv_qp *qp = txq_ctrl->ibv->qp;
+
+ ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
+ if (ret) {
+ DRV_LOG(ERR, "Cannot change the Tx QP state to RESET "
+ "%s\n", strerror(errno));
+ rte_errno = errno;
+ return ret;
+ }
+ mod.qp_state = IBV_QPS_INIT;
+ ret = mlx5_glue->modify_qp(qp, &mod,
+ (IBV_QP_STATE | IBV_QP_PORT));
+ if (ret) {
+ DRV_LOG(ERR, "Cannot change Tx QP state to INIT %s\n",
+ strerror(errno));
+ rte_errno = errno;
+ return ret;
+ }
+ mod.qp_state = IBV_QPS_RTR;
+ ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
+ if (ret) {
+ DRV_LOG(ERR, "Cannot change Tx QP state to RTR %s\n",
+ strerror(errno));
+ rte_errno = errno;
+ return ret;
+ }
+ mod.qp_state = IBV_QPS_RTS;
+ ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
+ if (ret) {
+ DRV_LOG(ERR, "Cannot change Tx QP state to RTS %s\n",
+ strerror(errno));
+ rte_errno = errno;
+ return ret;
+ }
}
return 0;
}
* (e.g. mlx5_rxtx_vec_sse.c for x86).
*/
-__rte_weak uint16_t
-mlx5_tx_burst_raw_vec(void *dpdk_txq __rte_unused,
- struct rte_mbuf **pkts __rte_unused,
- uint16_t pkts_n __rte_unused)
-{
- return 0;
-}
-
-__rte_weak uint16_t
-mlx5_tx_burst_vec(void *dpdk_txq __rte_unused,
- struct rte_mbuf **pkts __rte_unused,
- uint16_t pkts_n __rte_unused)
-{
- return 0;
-}
-
__rte_weak uint16_t
mlx5_rx_burst_vec(void *dpdk_txq __rte_unused,
struct rte_mbuf **pkts __rte_unused,
}
__rte_weak int
-mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev __rte_unused)
+mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
{
return -ENOTSUP;
}
__rte_weak int
-mlx5_check_vec_tx_support(struct rte_eth_dev *dev __rte_unused)
+mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)
{
return -ENOTSUP;
}
-__rte_weak int
-mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
+/**
+ * DPDK callback to check the status of a tx descriptor.
+ *
+ * @param tx_queue
+ * The tx queue.
+ * @param[in] offset
+ * The index of the descriptor in the ring.
+ *
+ * @return
+ * The status of the tx descriptor.
+ */
+int
+mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
{
- return -ENOTSUP;
+ (void)tx_queue;
+ (void)offset;
+ return RTE_ETH_TX_DESC_FULL;
}
-__rte_weak int
-mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)
+/**
+ * Configure the TX function to use.
+ *
+ * @param dev
+ * Pointer to private data structure.
+ *
+ * @return
+ * Pointer to selected Tx burst function.
+ */
+eth_tx_burst_t
+mlx5_select_tx_function(struct rte_eth_dev *dev)
{
- return -ENOTSUP;
+ (void)dev;
+ return removed_tx_burst;
}
+
+