-static inline void *
-mlx5_copy_to_wq(void *dst, const void *src, size_t n,
- void *base, size_t tailroom)
-{
- void *ret;
-
- if (n > tailroom) {
- rte_memcpy(dst, src, tailroom);
- rte_memcpy(base, (void *)((uintptr_t)src + tailroom),
- n - tailroom);
- ret = (uint8_t *)base + n - tailroom;
- } else {
- rte_memcpy(dst, src, n);
- ret = (n == tailroom) ? base : (uint8_t *)dst + n;
- }
- return ret;
-}
-
-/**
- * DPDK callback to check the status of a tx descriptor.
- *
- * @param tx_queue
- * The tx queue.
- * @param[in] offset
- * The index of the descriptor in the ring.
- *
- * @return
- * The status of the tx descriptor.
- */
-int
-mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
-{
- struct mlx5_txq_data *txq = tx_queue;
- uint16_t used;
-
- mlx5_tx_complete(txq);
- used = txq->elts_head - txq->elts_tail;
- if (offset < used)
- return RTE_ETH_TX_DESC_FULL;
- return RTE_ETH_TX_DESC_DONE;
-}
-
-/**
- * DPDK callback to check the status of a rx descriptor.
- *
- * @param rx_queue
- * The rx queue.
- * @param[in] offset
- * The index of the descriptor in the ring.
- *
- * @return
- * The status of the tx descriptor.
- */
-int
-mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
-{
- struct mlx5_rxq_data *rxq = rx_queue;
- struct rxq_zip *zip = &rxq->zip;
- volatile struct mlx5_cqe *cqe;
- const unsigned int cqe_n = (1 << rxq->cqe_n);
- const unsigned int cqe_cnt = cqe_n - 1;
- unsigned int cq_ci;
- unsigned int used;
-
- /* if we are processing a compressed cqe */
- if (zip->ai) {
- used = zip->cqe_cnt - zip->ca;
- cq_ci = zip->cq_ci;
- } else {
- used = 0;
- cq_ci = rxq->cq_ci;
- }
- cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
- while (check_cqe(cqe, cqe_n, cq_ci) == 0) {
- int8_t op_own;
- unsigned int n;
-
- op_own = cqe->op_own;
- if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
- n = rte_be_to_cpu_32(cqe->byte_cnt);
- else
- n = 1;
- cq_ci += n;
- used += n;
- cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
- }
- used = RTE_MIN(used, (1U << rxq->elts_n) - 1);
- if (offset < used)
- return RTE_ETH_RX_DESC_DONE;
- return RTE_ETH_RX_DESC_AVAIL;
-}
-
-/**
- * DPDK callback for TX.
- *
- * @param dpdk_txq
- * Generic pointer to TX queue structure.
- * @param[in] pkts
- * Packets to transmit.
- * @param pkts_n
- * Number of packets in array.
- *
- * @return
- * Number of packets successfully transmitted (<= pkts_n).
- */
-uint16_t
-mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
-{
- struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
- uint16_t elts_head = txq->elts_head;
- const uint16_t elts_n = 1 << txq->elts_n;
- const uint16_t elts_m = elts_n - 1;
- unsigned int i = 0;
- unsigned int j = 0;
- unsigned int k = 0;
- uint16_t max_elts;
- uint16_t max_wqe;
- unsigned int comp;
- volatile struct mlx5_wqe_ctrl *last_wqe = NULL;
- unsigned int segs_n = 0;
- const unsigned int max_inline = txq->max_inline;
-
- if (unlikely(!pkts_n))
- return 0;
- /* Prefetch first packet cacheline. */
- rte_prefetch0(*pkts);
- /* Start processing. */
- mlx5_tx_complete(txq);
- max_elts = (elts_n - (elts_head - txq->elts_tail));
- /* A CQE slot must always be available. */
- assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
- max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
- if (unlikely(!max_wqe))
- return 0;
- do {
- struct rte_mbuf *buf = NULL;
- uint8_t *raw;
- volatile struct mlx5_wqe_v *wqe = NULL;
- volatile rte_v128u32_t *dseg = NULL;
- uint32_t length;
- unsigned int ds = 0;
- unsigned int sg = 0; /* counter of additional segs attached. */
- uintptr_t addr;
- uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE + 2;
- uint16_t tso_header_sz = 0;
- uint16_t ehdr;
- uint8_t cs_flags;
- uint64_t tso = 0;
- uint16_t tso_segsz = 0;
-#ifdef MLX5_PMD_SOFT_COUNTERS
- uint32_t total_length = 0;
-#endif
-
- /* first_seg */
- buf = *pkts;
- segs_n = buf->nb_segs;
- /*
- * Make sure there is enough room to store this packet and
- * that one ring entry remains unused.
- */
- assert(segs_n);
- if (max_elts < segs_n)
- break;
- max_elts -= segs_n;
- sg = --segs_n;
- if (unlikely(--max_wqe == 0))
- break;
- wqe = (volatile struct mlx5_wqe_v *)
- tx_mlx5_wqe(txq, txq->wqe_ci);
- rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
- if (pkts_n - i > 1)
- rte_prefetch0(*(pkts + 1));
- addr = rte_pktmbuf_mtod(buf, uintptr_t);
- length = DATA_LEN(buf);
- ehdr = (((uint8_t *)addr)[1] << 8) |
- ((uint8_t *)addr)[0];
-#ifdef MLX5_PMD_SOFT_COUNTERS
- total_length = length;
-#endif
- if (length < (MLX5_WQE_DWORD_SIZE + 2)) {
- txq->stats.oerrors++;
- break;
- }
- /* Update element. */
- (*txq->elts)[elts_head & elts_m] = buf;
- /* Prefetch next buffer data. */
- if (pkts_n - i > 1)
- rte_prefetch0(
- rte_pktmbuf_mtod(*(pkts + 1), volatile void *));
- cs_flags = txq_ol_cksum_to_cs(txq, buf);
- raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE;
- /* Replace the Ethernet type by the VLAN if necessary. */
- if (buf->ol_flags & PKT_TX_VLAN_PKT) {
- uint32_t vlan = rte_cpu_to_be_32(0x81000000 |
- buf->vlan_tci);
- unsigned int len = 2 * ETHER_ADDR_LEN - 2;
-
- addr += 2;
- length -= 2;
- /* Copy Destination and source mac address. */
- memcpy((uint8_t *)raw, ((uint8_t *)addr), len);
- /* Copy VLAN. */
- memcpy((uint8_t *)raw + len, &vlan, sizeof(vlan));
- /* Copy missing two bytes to end the DSeg. */
- memcpy((uint8_t *)raw + len + sizeof(vlan),
- ((uint8_t *)addr) + len, 2);
- addr += len + 2;
- length -= (len + 2);
- } else {
- memcpy((uint8_t *)raw, ((uint8_t *)addr) + 2,
- MLX5_WQE_DWORD_SIZE);
- length -= pkt_inline_sz;
- addr += pkt_inline_sz;
- }
- raw += MLX5_WQE_DWORD_SIZE;
- tso = txq->tso_en && (buf->ol_flags & PKT_TX_TCP_SEG);
- if (tso) {
- uintptr_t end =
- (uintptr_t)(((uintptr_t)txq->wqes) +
- (1 << txq->wqe_n) * MLX5_WQE_SIZE);
- unsigned int copy_b;
- uint8_t vlan_sz =
- (buf->ol_flags & PKT_TX_VLAN_PKT) ? 4 : 0;
- const uint64_t is_tunneled =
- buf->ol_flags & (PKT_TX_TUNNEL_GRE |
- PKT_TX_TUNNEL_VXLAN);
-
- tso_header_sz = buf->l2_len + vlan_sz +
- buf->l3_len + buf->l4_len;
- tso_segsz = buf->tso_segsz;
- if (unlikely(tso_segsz == 0)) {
- txq->stats.oerrors++;
- break;
- }
- if (is_tunneled && txq->tunnel_en) {
- tso_header_sz += buf->outer_l2_len +
- buf->outer_l3_len;
- cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
- } else {
- cs_flags |= MLX5_ETH_WQE_L4_CSUM;
- }
- if (unlikely(tso_header_sz > MLX5_MAX_TSO_HEADER)) {
- txq->stats.oerrors++;
- break;
- }
- copy_b = tso_header_sz - pkt_inline_sz;
- /* First seg must contain all headers. */
- assert(copy_b <= length);
- if (copy_b && ((end - (uintptr_t)raw) > copy_b)) {
- uint16_t n = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4;
-
- if (unlikely(max_wqe < n))
- break;
- max_wqe -= n;
- rte_memcpy((void *)raw, (void *)addr, copy_b);
- addr += copy_b;
- length -= copy_b;
- /* Include padding for TSO header. */
- copy_b = MLX5_WQE_DS(copy_b) *
- MLX5_WQE_DWORD_SIZE;
- pkt_inline_sz += copy_b;
- raw += copy_b;
- } else {
- /* NOP WQE. */
- wqe->ctrl = (rte_v128u32_t){
- rte_cpu_to_be_32(txq->wqe_ci << 8),
- rte_cpu_to_be_32(txq->qp_num_8s | 1),
- 0,
- 0,
- };
- ds = 1;
-#ifdef MLX5_PMD_SOFT_COUNTERS
- total_length = 0;
-#endif
- k++;
- goto next_wqe;
- }
- }
- /* Inline if enough room. */
- if (max_inline || tso) {
- uint32_t inl = 0;
- uintptr_t end = (uintptr_t)
- (((uintptr_t)txq->wqes) +
- (1 << txq->wqe_n) * MLX5_WQE_SIZE);
- unsigned int inline_room = max_inline *
- RTE_CACHE_LINE_SIZE -
- (pkt_inline_sz - 2) -
- !!tso * sizeof(inl);
- uintptr_t addr_end;
- unsigned int copy_b;
-
-pkt_inline:
- addr_end = RTE_ALIGN_FLOOR(addr + inline_room,
- RTE_CACHE_LINE_SIZE);
- copy_b = (addr_end > addr) ?
- RTE_MIN((addr_end - addr), length) : 0;
- if (copy_b && ((end - (uintptr_t)raw) > copy_b)) {
- /*
- * One Dseg remains in the current WQE. To
- * keep the computation positive, it is
- * removed after the bytes to Dseg conversion.
- */
- uint16_t n = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4;
-
- if (unlikely(max_wqe < n))
- break;
- max_wqe -= n;
- if (tso && !inl) {
- inl = rte_cpu_to_be_32(copy_b |
- MLX5_INLINE_SEG);
- rte_memcpy((void *)raw,
- (void *)&inl, sizeof(inl));
- raw += sizeof(inl);
- pkt_inline_sz += sizeof(inl);
- }
- rte_memcpy((void *)raw, (void *)addr, copy_b);
- addr += copy_b;
- length -= copy_b;
- pkt_inline_sz += copy_b;
- }
- /*
- * 2 DWORDs consumed by the WQE header + ETH segment +
- * the size of the inline part of the packet.
- */
- ds = 2 + MLX5_WQE_DS(pkt_inline_sz - 2);
- if (length > 0) {
- if (ds % (MLX5_WQE_SIZE /
- MLX5_WQE_DWORD_SIZE) == 0) {
- if (unlikely(--max_wqe == 0))
- break;
- dseg = (volatile rte_v128u32_t *)
- tx_mlx5_wqe(txq, txq->wqe_ci +
- ds / 4);
- } else {
- dseg = (volatile rte_v128u32_t *)
- ((uintptr_t)wqe +
- (ds * MLX5_WQE_DWORD_SIZE));
- }
- goto use_dseg;
- } else if (!segs_n) {
- goto next_pkt;
- } else {
- raw += copy_b;
- inline_room -= copy_b;
- --segs_n;
- buf = buf->next;
- assert(buf);
- addr = rte_pktmbuf_mtod(buf, uintptr_t);
- length = DATA_LEN(buf);
-#ifdef MLX5_PMD_SOFT_COUNTERS
- total_length += length;
-#endif
- (*txq->elts)[++elts_head & elts_m] = buf;
- goto pkt_inline;
- }
- } else {
- /*
- * No inline has been done in the packet, only the
- * Ethernet Header as been stored.
- */
- dseg = (volatile rte_v128u32_t *)
- ((uintptr_t)wqe + (3 * MLX5_WQE_DWORD_SIZE));
- ds = 3;
-use_dseg:
- /* Add the remaining packet as a simple ds. */
- addr = rte_cpu_to_be_64(addr);
- *dseg = (rte_v128u32_t){
- rte_cpu_to_be_32(length),
- mlx5_tx_mb2mr(txq, buf),
- addr,
- addr >> 32,
- };
- ++ds;
- if (!segs_n)
- goto next_pkt;
- }
-next_seg:
- assert(buf);
- assert(ds);
- assert(wqe);
- /*
- * Spill on next WQE when the current one does not have
- * enough room left. Size of WQE must a be a multiple
- * of data segment size.
- */
- assert(!(MLX5_WQE_SIZE % MLX5_WQE_DWORD_SIZE));
- if (!(ds % (MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE))) {
- if (unlikely(--max_wqe == 0))
- break;
- dseg = (volatile rte_v128u32_t *)
- tx_mlx5_wqe(txq, txq->wqe_ci + ds / 4);
- rte_prefetch0(tx_mlx5_wqe(txq,
- txq->wqe_ci + ds / 4 + 1));
- } else {
- ++dseg;
- }
- ++ds;
- buf = buf->next;
- assert(buf);
- length = DATA_LEN(buf);
-#ifdef MLX5_PMD_SOFT_COUNTERS
- total_length += length;
-#endif
- /* Store segment information. */
- addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t));
- *dseg = (rte_v128u32_t){
- rte_cpu_to_be_32(length),
- mlx5_tx_mb2mr(txq, buf),
- addr,
- addr >> 32,
- };
- (*txq->elts)[++elts_head & elts_m] = buf;
- if (--segs_n)
- goto next_seg;
-next_pkt:
- if (ds > MLX5_DSEG_MAX) {
- txq->stats.oerrors++;
- break;
- }
- ++elts_head;
- ++pkts;
- ++i;
- j += sg;
- /* Initialize known and common part of the WQE structure. */
- if (tso) {
- wqe->ctrl = (rte_v128u32_t){
- rte_cpu_to_be_32((txq->wqe_ci << 8) |
- MLX5_OPCODE_TSO),
- rte_cpu_to_be_32(txq->qp_num_8s | ds),
- 0,
- 0,
- };
- wqe->eseg = (rte_v128u32_t){
- 0,
- cs_flags | (rte_cpu_to_be_16(tso_segsz) << 16),
- 0,
- (ehdr << 16) | rte_cpu_to_be_16(tso_header_sz),
- };
- } else {
- wqe->ctrl = (rte_v128u32_t){
- rte_cpu_to_be_32((txq->wqe_ci << 8) |
- MLX5_OPCODE_SEND),
- rte_cpu_to_be_32(txq->qp_num_8s | ds),
- 0,
- 0,
- };
- wqe->eseg = (rte_v128u32_t){
- 0,
- cs_flags,
- 0,
- (ehdr << 16) | rte_cpu_to_be_16(pkt_inline_sz),
- };
- }
-next_wqe:
- txq->wqe_ci += (ds + 3) / 4;
- /* Save the last successful WQE for completion request */
- last_wqe = (volatile struct mlx5_wqe_ctrl *)wqe;
-#ifdef MLX5_PMD_SOFT_COUNTERS
- /* Increment sent bytes counter. */
- txq->stats.obytes += total_length;
-#endif
- } while (i < pkts_n);
- /* Take a shortcut if nothing must be sent. */
- if (unlikely((i + k) == 0))
- return 0;
- txq->elts_head += (i + j);
- /* Check whether completion threshold has been reached. */
- comp = txq->elts_comp + i + j + k;
- if (comp >= MLX5_TX_COMP_THRESH) {
- /* Request completion on last WQE. */
- last_wqe->ctrl2 = rte_cpu_to_be_32(8);
- /* Save elts_head in unused "immediate" field of WQE. */
- last_wqe->ctrl3 = txq->elts_head;
- txq->elts_comp = 0;
-#ifndef NDEBUG
- ++txq->cq_pi;
-#endif
- } else {
- txq->elts_comp = comp;
- }
-#ifdef MLX5_PMD_SOFT_COUNTERS
- /* Increment sent packets counter. */
- txq->stats.opackets += i;
-#endif
- /* Ring QP doorbell. */
- mlx5_tx_dbrec(txq, (volatile struct mlx5_wqe *)last_wqe);
- return i;
-}
-
-/**
- * Open a MPW session.
- *
- * @param txq
- * Pointer to TX queue structure.
- * @param mpw
- * Pointer to MPW session structure.
- * @param length
- * Packet length.
- */
-static inline void
-mlx5_mpw_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw, uint32_t length)
-{
- uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
- volatile struct mlx5_wqe_data_seg (*dseg)[MLX5_MPW_DSEG_MAX] =
- (volatile struct mlx5_wqe_data_seg (*)[])
- tx_mlx5_wqe(txq, idx + 1);
-
- mpw->state = MLX5_MPW_STATE_OPENED;
- mpw->pkts_n = 0;
- mpw->len = length;
- mpw->total_len = 0;
- mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
- mpw->wqe->eseg.mss = rte_cpu_to_be_16(length);
- mpw->wqe->eseg.inline_hdr_sz = 0;
- mpw->wqe->eseg.rsvd0 = 0;
- mpw->wqe->eseg.rsvd1 = 0;
- mpw->wqe->eseg.rsvd2 = 0;
- mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) |
- (txq->wqe_ci << 8) |
- MLX5_OPCODE_TSO);
- mpw->wqe->ctrl[2] = 0;
- mpw->wqe->ctrl[3] = 0;
- mpw->data.dseg[0] = (volatile struct mlx5_wqe_data_seg *)
- (((uintptr_t)mpw->wqe) + (2 * MLX5_WQE_DWORD_SIZE));
- mpw->data.dseg[1] = (volatile struct mlx5_wqe_data_seg *)
- (((uintptr_t)mpw->wqe) + (3 * MLX5_WQE_DWORD_SIZE));
- mpw->data.dseg[2] = &(*dseg)[0];
- mpw->data.dseg[3] = &(*dseg)[1];
- mpw->data.dseg[4] = &(*dseg)[2];
-}
-
-/**
- * Close a MPW session.
- *
- * @param txq
- * Pointer to TX queue structure.
- * @param mpw
- * Pointer to MPW session structure.
- */
-static inline void
-mlx5_mpw_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)