Tx descriptor for TSO embeds packet header to be replicated. If Tx
inline is enabled, there could be additional packet data inlined with
4B inline header ahead. And between the header and additional inlined
packet data, there may be padding to make the inline part aligned to
MLX5_WQE_DWORD_SIZE. In calculating the total size of inlined data,
the size of inline header and padding is missing.
Fixes:
3f13f8c23a7c ("net/mlx5: support hardware TSO")
Cc: stable@dpdk.org
Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
Acked-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
length -= pkt_inline_sz;
addr += pkt_inline_sz;
}
length -= pkt_inline_sz;
addr += pkt_inline_sz;
}
+ raw += MLX5_WQE_DWORD_SIZE;
if (txq->tso_en) {
tso = buf->ol_flags & PKT_TX_TCP_SEG;
if (tso) {
if (txq->tso_en) {
tso = buf->ol_flags & PKT_TX_TCP_SEG;
if (tso) {
copy_b = tso_header_sz - pkt_inline_sz;
/* First seg must contain all headers. */
assert(copy_b <= length);
copy_b = tso_header_sz - pkt_inline_sz;
/* First seg must contain all headers. */
assert(copy_b <= length);
- raw += MLX5_WQE_DWORD_SIZE;
if (copy_b &&
((end - (uintptr_t)raw) > copy_b)) {
uint16_t n = (MLX5_WQE_DS(copy_b) -
if (copy_b &&
((end - (uintptr_t)raw) > copy_b)) {
uint16_t n = (MLX5_WQE_DS(copy_b) -
(void *)addr, copy_b);
addr += copy_b;
length -= copy_b;
(void *)addr, copy_b);
addr += copy_b;
length -= copy_b;
+ /* Include padding for TSO header. */
+ copy_b = MLX5_WQE_DS(copy_b) *
+ MLX5_WQE_DWORD_SIZE;
- /*
- * Another DWORD will be added
- * in the inline part.
- */
- raw += MLX5_WQE_DS(copy_b) *
- MLX5_WQE_DWORD_SIZE -
- MLX5_WQE_DWORD_SIZE;
} else {
/* NOP WQE. */
wqe->ctrl = (rte_v128u32_t){
} else {
/* NOP WQE. */
wqe->ctrl = (rte_v128u32_t){
}
/* Inline if enough room. */
if (inline_en || tso) {
}
/* Inline if enough room. */
if (inline_en || tso) {
uintptr_t end = (uintptr_t)
(((uintptr_t)txq->wqes) +
(1 << txq->wqe_n) * MLX5_WQE_SIZE);
unsigned int inline_room = max_inline *
RTE_CACHE_LINE_SIZE -
uintptr_t end = (uintptr_t)
(((uintptr_t)txq->wqes) +
(1 << txq->wqe_n) * MLX5_WQE_SIZE);
unsigned int inline_room = max_inline *
RTE_CACHE_LINE_SIZE -
+ (pkt_inline_sz - 2) -
+ !!tso * sizeof(inl);
uintptr_t addr_end = (addr + inline_room) &
~(RTE_CACHE_LINE_SIZE - 1);
unsigned int copy_b = (addr_end > addr) ?
RTE_MIN((addr_end - addr), length) :
0;
uintptr_t addr_end = (addr + inline_room) &
~(RTE_CACHE_LINE_SIZE - 1);
unsigned int copy_b = (addr_end > addr) ?
RTE_MIN((addr_end - addr), length) :
0;
- raw += MLX5_WQE_DWORD_SIZE;
if (copy_b && ((end - (uintptr_t)raw) > copy_b)) {
/*
* One Dseg remains in the current WQE. To
if (copy_b && ((end - (uintptr_t)raw) > copy_b)) {
/*
* One Dseg remains in the current WQE. To
break;
max_wqe -= n;
if (tso) {
break;
max_wqe -= n;
if (tso) {
- uint32_t inl =
- htonl(copy_b | MLX5_INLINE_SEG);
-
- pkt_inline_sz =
- MLX5_WQE_DS(tso_header_sz) *
- MLX5_WQE_DWORD_SIZE;
+ inl = htonl(copy_b | MLX5_INLINE_SEG);
rte_memcpy((void *)raw,
(void *)&inl, sizeof(inl));
raw += sizeof(inl);
rte_memcpy((void *)raw,
(void *)&inl, sizeof(inl));
raw += sizeof(inl);