op_own = cqe->op_own;
if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
- n = ntohl(cqe->byte_cnt);
+ n = rte_be_to_cpu_32(cqe->byte_cnt);
else
n = 1;
cq_ci += n;
raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE;
/* Replace the Ethernet type by the VLAN if necessary. */
if (buf->ol_flags & PKT_TX_VLAN_PKT) {
- uint32_t vlan = htonl(0x81000000 | buf->vlan_tci);
+ uint32_t vlan = rte_cpu_to_be_32(0x81000000 |
+ buf->vlan_tci);
unsigned int len = 2 * ETHER_ADDR_LEN - 2;
addr += 2;
} else {
/* NOP WQE. */
wqe->ctrl = (rte_v128u32_t){
- htonl(txq->wqe_ci << 8),
- htonl(txq->qp_num_8s | 1),
+ rte_cpu_to_be_32(
+ txq->wqe_ci << 8),
+ rte_cpu_to_be_32(
+ txq->qp_num_8s | 1),
0,
0,
};
break;
max_wqe -= n;
if (tso) {
- inl = htonl(copy_b | MLX5_INLINE_SEG);
+ uint32_t inl =
+ rte_cpu_to_be_32(copy_b |
+ MLX5_INLINE_SEG);
+
+ pkt_inline_sz =
+ MLX5_WQE_DS(tso_header_sz) *
+ MLX5_WQE_DWORD_SIZE;
+
rte_memcpy((void *)raw,
(void *)&inl, sizeof(inl));
raw += sizeof(inl);
ds = 3;
use_dseg:
/* Add the remaining packet as a simple ds. */
- naddr = htonll(addr);
+ naddr = rte_cpu_to_be_64(addr);
*dseg = (rte_v128u32_t){
- htonl(length),
+ rte_cpu_to_be_32(length),
mlx5_tx_mb2mr(txq, buf),
naddr,
naddr >> 32,
total_length += length;
#endif
/* Store segment information. */
- naddr = htonll(rte_pktmbuf_mtod(buf, uintptr_t));
+ naddr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t));
*dseg = (rte_v128u32_t){
- htonl(length),
+ rte_cpu_to_be_32(length),
mlx5_tx_mb2mr(txq, buf),
naddr,
naddr >> 32,
/* Initialize known and common part of the WQE structure. */
if (tso) {
wqe->ctrl = (rte_v128u32_t){
- htonl((txq->wqe_ci << 8) | MLX5_OPCODE_TSO),
- htonl(txq->qp_num_8s | ds),
+ rte_cpu_to_be_32((txq->wqe_ci << 8) |
+ MLX5_OPCODE_TSO),
+ rte_cpu_to_be_32(txq->qp_num_8s | ds),
0,
0,
};
wqe->eseg = (rte_v128u32_t){
0,
- cs_flags | (htons(tso_segsz) << 16),
+ cs_flags | (rte_cpu_to_be_16(tso_segsz) << 16),
0,
- (ehdr << 16) | htons(tso_header_sz),
+ (ehdr << 16) | rte_cpu_to_be_16(tso_header_sz),
};
} else {
wqe->ctrl = (rte_v128u32_t){
- htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND),
- htonl(txq->qp_num_8s | ds),
+ rte_cpu_to_be_32((txq->wqe_ci << 8) |
+ MLX5_OPCODE_SEND),
+ rte_cpu_to_be_32(txq->qp_num_8s | ds),
0,
0,
};
0,
cs_flags,
0,
- (ehdr << 16) | htons(pkt_inline_sz),
+ (ehdr << 16) | rte_cpu_to_be_16(pkt_inline_sz),
};
}
next_wqe:
comp = txq->elts_comp + i + j + k;
if (comp >= MLX5_TX_COMP_THRESH) {
/* Request completion on last WQE. */
- last_wqe->ctrl2 = htonl(8);
+ last_wqe->ctrl2 = rte_cpu_to_be_32(8);
/* Save elts_head in unused "immediate" field of WQE. */
last_wqe->ctrl3 = txq->elts_head;
txq->elts_comp = 0;
mpw->len = length;
mpw->total_len = 0;
mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
- mpw->wqe->eseg.mss = htons(length);
+ mpw->wqe->eseg.mss = rte_cpu_to_be_16(length);
mpw->wqe->eseg.inline_hdr_sz = 0;
mpw->wqe->eseg.rsvd0 = 0;
mpw->wqe->eseg.rsvd1 = 0;
mpw->wqe->eseg.rsvd2 = 0;
- mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_MPW << 24) |
- (txq->wqe_ci << 8) | MLX5_OPCODE_TSO);
+ mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) |
+ (txq->wqe_ci << 8) |
+ MLX5_OPCODE_TSO);
mpw->wqe->ctrl[2] = 0;
mpw->wqe->ctrl[3] = 0;
mpw->data.dseg[0] = (volatile struct mlx5_wqe_data_seg *)
* Store size in multiple of 16 bytes. Control and Ethernet segments
* count as 2.
*/
- mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | (2 + num));
+ mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s | (2 + num));
mpw->state = MLX5_MPW_STATE_CLOSED;
if (num < 3)
++txq->wqe_ci;
dseg = mpw.data.dseg[mpw.pkts_n];
addr = rte_pktmbuf_mtod(buf, uintptr_t);
*dseg = (struct mlx5_wqe_data_seg){
- .byte_count = htonl(DATA_LEN(buf)),
+ .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)),
.lkey = mlx5_tx_mb2mr(txq, buf),
- .addr = htonll(addr),
+ .addr = rte_cpu_to_be_64(addr),
};
#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
length += DATA_LEN(buf);
volatile struct mlx5_wqe *wqe = mpw.wqe;
/* Request completion on last WQE. */
- wqe->ctrl[2] = htonl(8);
+ wqe->ctrl[2] = rte_cpu_to_be_32(8);
/* Save elts_head in unused "immediate" field of WQE. */
wqe->ctrl[3] = elts_head;
txq->elts_comp = 0;
mpw->len = length;
mpw->total_len = 0;
mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
- mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_MPW << 24) |
- (txq->wqe_ci << 8) |
- MLX5_OPCODE_TSO);
+ mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) |
+ (txq->wqe_ci << 8) |
+ MLX5_OPCODE_TSO);
mpw->wqe->ctrl[2] = 0;
mpw->wqe->ctrl[3] = 0;
- mpw->wqe->eseg.mss = htons(length);
+ mpw->wqe->eseg.mss = rte_cpu_to_be_16(length);
mpw->wqe->eseg.inline_hdr_sz = 0;
mpw->wqe->eseg.cs_flags = 0;
mpw->wqe->eseg.rsvd0 = 0;
* Store size in multiple of 16 bytes. Control and Ethernet segments
* count as 2.
*/
- mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | MLX5_WQE_DS(size));
+ mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s |
+ MLX5_WQE_DS(size));
mpw->state = MLX5_MPW_STATE_CLOSED;
- inl->byte_cnt = htonl(mpw->total_len | MLX5_INLINE_SEG);
+ inl->byte_cnt = rte_cpu_to_be_32(mpw->total_len | MLX5_INLINE_SEG);
txq->wqe_ci += (size + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE;
}
dseg = mpw.data.dseg[mpw.pkts_n];
addr = rte_pktmbuf_mtod(buf, uintptr_t);
*dseg = (struct mlx5_wqe_data_seg){
- .byte_count = htonl(DATA_LEN(buf)),
+ .byte_count =
+ rte_cpu_to_be_32(DATA_LEN(buf)),
.lkey = mlx5_tx_mb2mr(txq, buf),
- .addr = htonll(addr),
+ .addr = rte_cpu_to_be_64(addr),
};
#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
length += DATA_LEN(buf);
volatile struct mlx5_wqe *wqe = mpw.wqe;
/* Request completion on last WQE. */
- wqe->ctrl[2] = htonl(8);
+ wqe->ctrl[2] = rte_cpu_to_be_32(8);
/* Save elts_head in unused "immediate" field of WQE. */
wqe->ctrl[3] = elts_head;
txq->elts_comp = 0;
mpw->pkts_n = 0;
mpw->total_len = sizeof(struct mlx5_wqe);
mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
- mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_ENHANCED_MPSW << 24) |
- (txq->wqe_ci << 8) |
- MLX5_OPCODE_ENHANCED_MPSW);
+ mpw->wqe->ctrl[0] =
+ rte_cpu_to_be_32((MLX5_OPC_MOD_ENHANCED_MPSW << 24) |
+ (txq->wqe_ci << 8) |
+ MLX5_OPCODE_ENHANCED_MPSW);
mpw->wqe->ctrl[2] = 0;
mpw->wqe->ctrl[3] = 0;
memset((void *)(uintptr_t)&mpw->wqe->eseg, 0, MLX5_WQE_DWORD_SIZE);
uintptr_t addr = (uintptr_t)(mpw->wqe + 1);
/* Pad the first 2 DWORDs with zero-length inline header. */
- *(volatile uint32_t *)addr = htonl(MLX5_INLINE_SEG);
+ *(volatile uint32_t *)addr = rte_cpu_to_be_32(MLX5_INLINE_SEG);
*(volatile uint32_t *)(addr + MLX5_WQE_DWORD_SIZE) =
- htonl(MLX5_INLINE_SEG);
+ rte_cpu_to_be_32(MLX5_INLINE_SEG);
mpw->total_len += 2 * MLX5_WQE_DWORD_SIZE;
/* Start from the next WQEBB. */
mpw->data.raw = (volatile void *)(tx_mlx5_wqe(txq, idx + 1));
/* Store size in multiple of 16 bytes. Control and Ethernet segments
* count as 2.
*/
- mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | MLX5_WQE_DS(mpw->total_len));
+ mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s |
+ MLX5_WQE_DS(mpw->total_len));
mpw->state = MLX5_MPW_STATE_CLOSED;
ret = (mpw->total_len + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE;
txq->wqe_ci += ret;
dseg = mpw.data.dseg[mpw.pkts_n];
addr = rte_pktmbuf_mtod(buf, uintptr_t);
*dseg = (struct mlx5_wqe_data_seg){
- .byte_count = htonl(DATA_LEN(buf)),
+ .byte_count = rte_cpu_to_be_32(
+ DATA_LEN(buf)),
.lkey = mlx5_tx_mb2mr(txq, buf),
- .addr = htonll(addr),
+ .addr = rte_cpu_to_be_64(addr),
};
#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
length += DATA_LEN(buf);
assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED);
assert(length == DATA_LEN(buf));
- inl_hdr = htonl(length | MLX5_INLINE_SEG);
+ inl_hdr = rte_cpu_to_be_32(length | MLX5_INLINE_SEG);
addr = rte_pktmbuf_mtod(buf, uintptr_t);
mpw.data.raw = (volatile void *)
((uintptr_t)mpw.data.raw + inl_pad);
for (n = 0; n * RTE_CACHE_LINE_SIZE < length; n++)
rte_prefetch2((void *)(addr +
n * RTE_CACHE_LINE_SIZE));
- naddr = htonll(addr);
+ naddr = rte_cpu_to_be_64(addr);
*dseg = (rte_v128u32_t) {
- htonl(length),
+ rte_cpu_to_be_32(length),
mlx5_tx_mb2mr(txq, buf),
naddr,
naddr >> 32,
volatile struct mlx5_wqe *wqe = mpw.wqe;
/* Request completion on last WQE. */
- wqe->ctrl[2] = htonl(8);
+ wqe->ctrl[2] = rte_cpu_to_be_32(8);
/* Save elts_head in unused "immediate" field of WQE. */
wqe->ctrl[3] = elts_head;
txq->elts_comp = 0;
(volatile struct mlx5_mini_cqe8 (*)[8])
(uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].pkt_info);
- len = ntohl((*mc)[zip->ai & 7].byte_cnt);
- *rss_hash = ntohl((*mc)[zip->ai & 7].rx_hash_result);
+ len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt);
+ *rss_hash = rte_be_to_cpu_32((*mc)[zip->ai & 7].rx_hash_result);
if ((++zip->ai & 7) == 0) {
/* Invalidate consumed CQEs */
idx = zip->ca;
cqe_cnt].pkt_info);
/* Fix endianness. */
- zip->cqe_cnt = ntohl(cqe->byte_cnt);
+ zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
/*
* Current mini array position is the one returned by
* check_cqe64().
--rxq->cq_ci;
zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
/* Get packet size to return. */
- len = ntohl((*mc)[0].byte_cnt);
- *rss_hash = ntohl((*mc)[0].rx_hash_result);
+ len = rte_be_to_cpu_32((*mc)[0].byte_cnt);
+ *rss_hash = rte_be_to_cpu_32((*mc)[0].rx_hash_result);
zip->ai = 1;
/* Prefetch all the entries to be invalidated */
idx = zip->ca;
++idx;
}
} else {
- len = ntohl(cqe->byte_cnt);
- *rss_hash = ntohl(cqe->rx_hash_res);
+ len = rte_be_to_cpu_32(cqe->byte_cnt);
+ *rss_hash = rte_be_to_cpu_32(cqe->rx_hash_res);
}
/* Error while receiving packet. */
if (unlikely(MLX5_CQE_OPCODE(op_own) == MLX5_CQE_RESP_ERR))
rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe)
{
uint32_t ol_flags = 0;
- uint16_t flags = ntohs(cqe->hdr_type_etc);
+ uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
ol_flags =
TRANSPOSE(flags,
MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) {
pkt->ol_flags |= PKT_RX_FDIR;
if (cqe->sop_drop_qpn !=
- htonl(MLX5_FLOW_MARK_DEFAULT)) {
+ rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) {
uint32_t mark = cqe->sop_drop_qpn;
pkt->ol_flags |= PKT_RX_FDIR_ID;
pkt->ol_flags |= rxq_cq_to_ol_flags(rxq, cqe);
if (rxq->vlan_strip &&
(cqe->hdr_type_etc &
- htons(MLX5_CQE_VLAN_STRIPPED))) {
+ rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) {
pkt->ol_flags |= PKT_RX_VLAN_PKT |
PKT_RX_VLAN_STRIPPED;
- pkt->vlan_tci = ntohs(cqe->vlan_info);
+ pkt->vlan_tci =
+ rte_be_to_cpu_16(cqe->vlan_info);
}
if (rxq->crc_present)
len -= ETHER_CRC_LEN;
* of the buffers are already known, only the buffer address
* changes.
*/
- wqe->addr = htonll(rte_pktmbuf_mtod(rep, uintptr_t));
+ wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
if (len > DATA_LEN(seg)) {
len -= DATA_LEN(seg);
++NB_SEGS(pkt);
/* Update the consumer index. */
rxq->rq_ci = rq_ci >> sges_n;
rte_wmb();
- *rxq->cq_db = htonl(rxq->cq_ci);
+ *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
rte_wmb();
- *rxq->rq_db = htonl(rxq->rq_ci);
+ *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Increment packets counter. */
rxq->stats.ipackets += i;