#include "hns3_logs.h"
#define HNS3_CFG_DESC_NUM(num) ((num) / 8 - 1)
-#define DEFAULT_RX_FREE_THRESH 16
+#define DEFAULT_RX_FREE_THRESH 32
static void
hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq)
}
void
-hns3_tqp_intr_enable(struct hns3_hw *hw, uint16_t tpq_int_num, bool en)
+hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
+ uint8_t gl_idx, uint16_t gl_value)
{
+ uint32_t offset[] = {HNS3_TQP_INTR_GL0_REG,
+ HNS3_TQP_INTR_GL1_REG,
+ HNS3_TQP_INTR_GL2_REG};
uint32_t addr, value;
- addr = HNS3_TQP_INTR_CTRL_REG + tpq_int_num * HNS3_VECTOR_REG_OFFSET;
+ if (gl_idx >= RTE_DIM(offset) || gl_value > HNS3_TQP_INTR_GL_MAX)
+ return;
+
+ addr = offset[gl_idx] + queue_id * HNS3_TQP_INTR_REG_SIZE;
+ value = HNS3_GL_USEC_TO_REG(gl_value);
+
+ hns3_write_dev(hw, addr, value);
+}
+
+void
+hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, uint16_t rl_value)
+{
+ uint32_t addr, value;
+
+ if (rl_value > HNS3_TQP_INTR_RL_MAX)
+ return;
+
+ addr = HNS3_TQP_INTR_RL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
+ value = HNS3_RL_USEC_TO_REG(rl_value);
+ if (value > 0)
+ value |= HNS3_TQP_INTR_RL_ENABLE_MASK;
+
+ hns3_write_dev(hw, addr, value);
+}
+
+static void
+hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en)
+{
+ uint32_t addr, value;
+
+ addr = HNS3_TQP_INTR_CTRL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
value = en ? 1 : 0;
hns3_write_dev(hw, addr, value);
if (dev->data->dev_conf.intr_conf.rxq == 0)
return -ENOTSUP;
- /* enable the vectors */
- hns3_tqp_intr_enable(hw, queue_id, true);
+ hns3_queue_intr_enable(hw, queue_id, true);
return rte_intr_ack(intr_handle);
}
if (dev->data->dev_conf.intr_conf.rxq == 0)
return -ENOTSUP;
- /* disable the vectors */
- hns3_tqp_intr_enable(hw, queue_id, false);
+ hns3_queue_intr_enable(hw, queue_id, false);
return 0;
}
rxq->next_to_use = 0;
rxq->next_to_clean = 0;
+ rxq->nb_rx_hold = 0;
hns3_init_rx_queue_hw(rxq);
return 0;
rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[idx];
rxq->next_to_use = 0;
rxq->next_to_clean = 0;
+ rxq->nb_rx_hold = 0;
hns3_init_rx_queue_hw(rxq);
}
if (hw->fkq_data.rx_queues == NULL && nb_queues != 0) {
/* first time configuration */
-
uint32_t size;
size = sizeof(hw->fkq_data.rx_queues[0]) * nb_queues;
hw->fkq_data.rx_queues = rte_zmalloc("fake_rx_queues", size,
}
} else if (hw->fkq_data.rx_queues != NULL && nb_queues != 0) {
/* re-configure */
-
rxq = hw->fkq_data.rx_queues;
for (i = nb_queues; i < old_nb_queues; i++)
hns3_dev_rx_queue_release(rxq[i]);
if (hw->fkq_data.tx_queues == NULL && nb_queues != 0) {
/* first time configuration */
-
uint32_t size;
size = sizeof(hw->fkq_data.tx_queues[0]) * nb_queues;
hw->fkq_data.tx_queues = rte_zmalloc("fake_tx_queues", size,
}
} else if (hw->fkq_data.tx_queues != NULL && nb_queues != 0) {
/* re-configure */
-
txq = hw->fkq_data.tx_queues;
for (i = nb_queues; i < old_nb_queues; i++)
hns3_dev_tx_queue_release(txq[i]);
rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
idx * HNS3_TQP_REG_SIZE);
rxq->rx_buf_len = hw->rx_buf_len;
- rxq->non_vld_descs = 0;
rxq->l2_errors = 0;
rxq->pkt_len_errors = 0;
rxq->l3_csum_erros = 0;
uint16_t
hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
+ volatile struct hns3_desc *rx_ring; /* RX ring (desc) */
+ volatile struct hns3_desc *rxdp; /* pointer of the current desc */
struct hns3_rx_queue *rxq; /* RX queue */
- struct hns3_desc *rx_ring; /* RX ring (desc) */
struct hns3_entry *sw_ring;
struct hns3_entry *rxe;
- struct hns3_desc *rxdp; /* pointer of the current desc */
struct rte_mbuf *first_seg;
struct rte_mbuf *last_seg;
+ struct hns3_desc rxd;
struct rte_mbuf *nmb; /* pointer of the new mbuf */
struct rte_mbuf *rxm;
struct rte_eth_dev *dev;
uint16_t pkt_len;
uint16_t nb_rx;
uint16_t rx_id;
- int num; /* num of desc in ring */
int ret;
nb_rx = 0;
last_seg = rxq->pkt_last_seg;
sw_ring = rxq->sw_ring;
- /* Get num of packets in descriptor ring */
- num = hns3_read_dev(rxq, HNS3_RING_RX_FBDNUM_REG);
- while (nb_rx_bd < num && nb_rx < nb_pkts) {
+ while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
- if (unlikely(!hns3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
- rxq->non_vld_descs++;
+ if (unlikely(!hns3_get_bit(bd_base_info, HNS3_RXD_VLD_B)))
break;
- }
+ /*
+ * The interactive process between software and hardware of
+ * receiving a new packet in hns3 network engine:
+ * 1. Hardware network engine firstly writes the packet content
+ * to the memory pointed by the 'addr' field of the Rx Buffer
+ * Descriptor, secondly fills the result of parsing the
+ * packet include the valid field into the Rx Buffer
+ * Descriptor in one write operation.
+ * 2. Driver reads the Rx BD's valid field in the loop to check
+ * whether it's valid, if valid then assign a new address to
+ * the addr field, clear the valid field, get the other
+ * information of the packet by parsing Rx BD's other fields,
+ * finally write back the number of Rx BDs processed by the
+ * driver to the HNS3_RING_RX_HEAD_REG register to inform
+ * hardware.
+ * In the above process, the ordering is very important. We must
+ * make sure that CPU read Rx BD's other fields only after the
+ * Rx BD is valid.
+ *
+ * There are two type of re-ordering: compiler re-ordering and
+ * CPU re-ordering under the ARMv8 architecture.
+ * 1. we use volatile to deal with compiler re-ordering, so you
+ * can see that rx_ring/rxdp defined with volatile.
+ * 2. we commonly use memory barrier to deal with CPU
+ * re-ordering, but the cost is high.
+ *
+ * In order to solve the high cost of using memory barrier, we
+ * use the data dependency order under the ARMv8 architecture,
+ * for example:
+ * instr01: load A
+ * instr02: load B <- A
+ * the instr02 will always execute after instr01.
+ *
+ * To construct the data dependency ordering, we use the
+ * following assignment:
+ * rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
+ * (1u<<HNS3_RXD_VLD_B)]
+ * Using gcc compiler under the ARMv8 architecture, the related
+ * assembly code example as follows:
+ * note: (1u << HNS3_RXD_VLD_B) equal 0x10
+ * instr01: ldr w26, [x22, #28] --read bd_base_info
+ * instr02: and w0, w26, #0x10 --calc bd_base_info & 0x10
+ * instr03: sub w0, w0, #0x10 --calc (bd_base_info &
+ * 0x10) - 0x10
+ * instr04: add x0, x22, x0, lsl #5 --calc copy source addr
+ * instr05: ldp x2, x3, [x0]
+ * instr06: stp x2, x3, [x29, #256] --copy BD's [0 ~ 15]B
+ * instr07: ldp x4, x5, [x0, #16]
+ * instr08: stp x4, x5, [x29, #272] --copy BD's [16 ~ 31]B
+ * the instr05~08 depend on x0's value, x0 depent on w26's
+ * value, the w26 is the bd_base_info, this form the data
+ * dependency ordering.
+ * note: if BD is valid, (bd_base_info & (1u<<HNS3_RXD_VLD_B)) -
+ * (1u<<HNS3_RXD_VLD_B) will always zero, so the
+ * assignment is correct.
+ *
+ * So we use the data dependency ordering instead of memory
+ * barrier to improve receive performance.
+ */
+ rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
+ (1u << HNS3_RXD_VLD_B)];
nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
if (unlikely(nmb == NULL)) {
nb_rx_bd++;
rxe = &sw_ring[rx_id];
rx_id++;
- if (rx_id == rxq->nb_rx_desc)
+ if (unlikely(rx_id == rxq->nb_rx_desc))
rx_id = 0;
rte_prefetch0(sw_ring[rx_id].mbuf);
rxe->mbuf = nmb;
dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
- rxdp->addr = dma_addr;
rxdp->rx.bd_base_info = 0;
+ rxdp->addr = dma_addr;
- rte_cio_rmb();
/* Load remained descriptor data and extract necessary fields */
- data_len = (uint16_t)(rte_le_to_cpu_16(rxdp->rx.size));
- l234_info = rte_le_to_cpu_32(rxdp->rx.l234_info);
- ol_info = rte_le_to_cpu_32(rxdp->rx.ol_info);
+ data_len = (uint16_t)(rte_le_to_cpu_16(rxd.rx.size));
+ l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
+ ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
if (first_seg == NULL) {
first_seg = rxm;
}
/* The last buffer of the received packet */
- pkt_len = (uint16_t)(rte_le_to_cpu_16(rxdp->rx.pkt_len));
+ pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.rx.pkt_len));
first_seg->pkt_len = pkt_len;
first_seg->port = rxq->port_id;
- first_seg->hash.rss = rte_le_to_cpu_32(rxdp->rx.rss_hash);
+ first_seg->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
first_seg->ol_flags |= PKT_RX_RSS_HASH;
if (unlikely(hns3_get_bit(bd_base_info, HNS3_RXD_LUM_B))) {
first_seg->hash.fdir.hi =
- rte_le_to_cpu_32(rxdp->rx.fd_id);
+ rte_le_to_cpu_32(rxd.rx.fd_id);
first_seg->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
}
rxm->next = NULL;
hns3_rx_set_cksum_flag(rxm, first_seg->packet_type,
cksum_err);
- first_seg->vlan_tci = rte_le_to_cpu_16(rxdp->rx.vlan_tag);
+ first_seg->vlan_tci = rte_le_to_cpu_16(rxd.rx.vlan_tag);
first_seg->vlan_tci_outer =
- rte_le_to_cpu_16(rxdp->rx.ot_vlan_tag);
+ rte_le_to_cpu_16(rxd.rx.ot_vlan_tag);
rx_pkts[nb_rx++] = first_seg;
first_seg = NULL;
continue;
rxq->next_to_clean = rx_id;
rxq->pkt_first_seg = first_seg;
rxq->pkt_last_seg = last_seg;
- hns3_clean_rx_buffers(rxq, nb_rx_bd);
+
+ nb_rx_bd = nb_rx_bd + rxq->nb_rx_hold;
+ if (nb_rx_bd > rxq->rx_free_thresh) {
+ hns3_clean_rx_buffers(rxq, nb_rx_bd);
+ nb_rx_bd = 0;
+ }
+ rxq->nb_rx_hold = nb_rx_bd;
return nb_rx;
}
txq->tx_bd_ready = tx_bd_ready;
}
+static int
+hns3_tso_proc_tunnel(struct hns3_desc *desc, uint64_t ol_flags,
+ struct rte_mbuf *rxm, uint8_t *l2_len)
+{
+ uint64_t tun_flags;
+ uint8_t ol4_len;
+ uint32_t otmp;
+
+ tun_flags = ol_flags & PKT_TX_TUNNEL_MASK;
+ if (tun_flags == 0)
+ return 0;
+
+ otmp = rte_le_to_cpu_32(desc->tx.ol_type_vlan_len_msec);
+ switch (tun_flags) {
+ case PKT_TX_TUNNEL_GENEVE:
+ case PKT_TX_TUNNEL_VXLAN:
+ *l2_len = rxm->l2_len - RTE_ETHER_VXLAN_HLEN;
+ break;
+ case PKT_TX_TUNNEL_GRE:
+ /*
+ * OL4 header size, defined in 4 Bytes, it contains outer
+ * L4(GRE) length and tunneling length.
+ */
+ ol4_len = hns3_get_field(otmp, HNS3_TXD_L4LEN_M,
+ HNS3_TXD_L4LEN_S);
+ *l2_len = rxm->l2_len - (ol4_len << HNS3_L4_LEN_UNIT);
+ break;
+ default:
+ /* For non UDP / GRE tunneling, drop the tunnel packet */
+ return -EINVAL;
+ }
+ hns3_set_field(otmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
+ rxm->outer_l2_len >> HNS3_L2_LEN_UNIT);
+ desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(otmp);
+
+ return 0;
+}
+
+static void
+hns3_set_tso(struct hns3_desc *desc,
+ uint64_t ol_flags, struct rte_mbuf *rxm)
+{
+ uint32_t paylen, hdr_len;
+ uint32_t tmp;
+ uint8_t l2_len = rxm->l2_len;
+
+ if (!(ol_flags & PKT_TX_TCP_SEG))
+ return;
+
+ if (hns3_tso_proc_tunnel(desc, ol_flags, rxm, &l2_len))
+ return;
+
+ hdr_len = rxm->l2_len + rxm->l3_len + rxm->l4_len;
+ hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
+ rxm->outer_l2_len + rxm->outer_l3_len : 0;
+ paylen = rxm->pkt_len - hdr_len;
+ if (paylen <= rxm->tso_segsz)
+ return;
+
+ tmp = rte_le_to_cpu_32(desc->tx.type_cs_vlan_tso_len);
+ hns3_set_bit(tmp, HNS3_TXD_TSO_B, 1);
+ hns3_set_bit(tmp, HNS3_TXD_L3CS_B, 1);
+ hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, HNS3_L4T_TCP);
+ hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
+ hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
+ sizeof(struct rte_tcp_hdr) >> HNS3_L4_LEN_UNIT);
+ hns3_set_field(tmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
+ l2_len >> HNS3_L2_LEN_UNIT);
+ desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp);
+ desc->tx.mss = rte_cpu_to_le_16(rxm->tso_segsz);
+}
+
static void
fill_desc(struct hns3_tx_queue *txq, uint16_t tx_desc_id, struct rte_mbuf *rxm,
bool first, int offset)
struct hns3_desc *tx_ring = txq->tx_ring;
struct hns3_desc *desc = &tx_ring[tx_desc_id];
uint8_t frag_end = rxm->next == NULL ? 1 : 0;
+ uint64_t ol_flags = rxm->ol_flags;
uint16_t size = rxm->data_len;
uint16_t rrcfv = 0;
- uint64_t ol_flags = rxm->ol_flags;
uint32_t hdr_len;
uint32_t paylen;
uint32_t tmp;
rxm->outer_l2_len + rxm->outer_l3_len : 0;
paylen = rxm->pkt_len - hdr_len;
desc->tx.paylen = rte_cpu_to_le_32(paylen);
+ hns3_set_tso(desc, ol_flags, rxm);
}
hns3_set_bit(rrcfv, HNS3_TXD_FE_B, frag_end);
desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(value);
}
+static bool
+hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num)
+{
+ struct rte_mbuf *m_first = tx_pkts;
+ struct rte_mbuf *m_last = tx_pkts;
+ uint32_t tot_len = 0;
+ uint32_t hdr_len;
+ uint32_t i;
+
+ /*
+ * Hardware requires that the sum of the data length of every 8
+ * consecutive buffers is greater than MSS in hns3 network engine.
+ * We simplify it by ensuring pkt_headlen + the first 8 consecutive
+ * frags greater than gso header len + mss, and the remaining 7
+ * consecutive frags greater than MSS except the last 7 frags.
+ */
+ if (bd_num <= HNS3_MAX_NON_TSO_BD_PER_PKT)
+ return false;
+
+ for (i = 0; m_last && i < HNS3_MAX_NON_TSO_BD_PER_PKT - 1;
+ i++, m_last = m_last->next)
+ tot_len += m_last->data_len;
+
+ if (!m_last)
+ return true;
+
+ /* ensure the first 8 frags is greater than mss + header */
+ hdr_len = tx_pkts->l2_len + tx_pkts->l3_len + tx_pkts->l4_len;
+ hdr_len += (tx_pkts->ol_flags & PKT_TX_TUNNEL_MASK) ?
+ tx_pkts->outer_l2_len + tx_pkts->outer_l3_len : 0;
+ if (tot_len + m_last->data_len < tx_pkts->tso_segsz + hdr_len)
+ return true;
+
+ /*
+ * ensure the sum of the data length of every 7 consecutive buffer
+ * is greater than mss except the last one.
+ */
+ for (i = 0; m_last && i < bd_num - HNS3_MAX_NON_TSO_BD_PER_PKT; i++) {
+ tot_len -= m_first->data_len;
+ tot_len += m_last->data_len;
+
+ if (tot_len < tx_pkts->tso_segsz)
+ return true;
+
+ m_first = m_first->next;
+ m_last = m_last->next;
+ }
+
+ return false;
+}
+
+static void
+hns3_outer_header_cksum_prepare(struct rte_mbuf *m)
+{
+ uint64_t ol_flags = m->ol_flags;
+ struct rte_ipv4_hdr *ipv4_hdr;
+ struct rte_udp_hdr *udp_hdr;
+ uint32_t paylen, hdr_len;
+
+ if (!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6)))
+ return;
+
+ if (ol_flags & PKT_TX_IPV4) {
+ ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
+ m->outer_l2_len);
+
+ if (ol_flags & PKT_TX_IP_CKSUM)
+ ipv4_hdr->hdr_checksum = 0;
+ }
+
+ if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM &&
+ ol_flags & PKT_TX_TCP_SEG) {
+ hdr_len = m->l2_len + m->l3_len + m->l4_len;
+ hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
+ m->outer_l2_len + m->outer_l3_len : 0;
+ paylen = m->pkt_len - hdr_len;
+ if (paylen <= m->tso_segsz)
+ return;
+ udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
+ m->outer_l2_len +
+ m->outer_l3_len);
+ udp_hdr->dgram_cksum = 0;
+ }
+}
+
+static inline bool
+hns3_pkt_is_tso(struct rte_mbuf *m)
+{
+ return (m->tso_segsz != 0 && m->ol_flags & PKT_TX_TCP_SEG);
+}
+
+static int
+hns3_check_tso_pkt_valid(struct rte_mbuf *m)
+{
+ uint32_t tmp_data_len_sum = 0;
+ uint16_t nb_buf = m->nb_segs;
+ uint32_t paylen, hdr_len;
+ struct rte_mbuf *m_seg;
+ int i;
+
+ if (nb_buf > HNS3_MAX_TSO_BD_PER_PKT)
+ return -EINVAL;
+
+ hdr_len = m->l2_len + m->l3_len + m->l4_len;
+ hdr_len += (m->ol_flags & PKT_TX_TUNNEL_MASK) ?
+ m->outer_l2_len + m->outer_l3_len : 0;
+ if (hdr_len > HNS3_MAX_TSO_HDR_SIZE)
+ return -EINVAL;
+
+ paylen = m->pkt_len - hdr_len;
+ if (paylen > HNS3_MAX_BD_PAYLEN)
+ return -EINVAL;
+
+ /*
+ * The TSO header (include outer and inner L2, L3 and L4 header)
+ * should be provided by three descriptors in maximum in hns3 network
+ * engine.
+ */
+ m_seg = m;
+ for (i = 0; m_seg != NULL && i < HNS3_MAX_TSO_HDR_BD_NUM && i < nb_buf;
+ i++, m_seg = m_seg->next) {
+ tmp_data_len_sum += m_seg->data_len;
+ }
+
+ if (hdr_len > tmp_data_len_sum)
+ return -EINVAL;
+
+ return 0;
+}
+
uint16_t
hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
return i;
}
+ if (hns3_pkt_is_tso(m) &&
+ (hns3_pkt_need_linearized(m, m->nb_segs) ||
+ hns3_check_tso_pkt_valid(m))) {
+ rte_errno = EINVAL;
+ return i;
+ }
+
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
ret = rte_validate_tx_offload(m);
if (ret != 0) {
rte_errno = -ret;
return i;
}
+
+ hns3_outer_header_cksum_prepare(m);
}
return i;
return 0;
}
+static int
+hns3_check_non_tso_pkt(uint16_t nb_buf, struct rte_mbuf **m_seg,
+ struct rte_mbuf *tx_pkt, struct hns3_tx_queue *txq)
+{
+ struct rte_mbuf *new_pkt;
+ int ret;
+
+ if (hns3_pkt_is_tso(*m_seg))
+ return 0;
+
+ /*
+ * If packet length is greater than HNS3_MAX_FRAME_LEN
+ * driver support, the packet will be ignored.
+ */
+ if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN))
+ return -EINVAL;
+
+ if (unlikely(nb_buf > HNS3_MAX_NON_TSO_BD_PER_PKT)) {
+ ret = hns3_reassemble_tx_pkts(txq, tx_pkt, &new_pkt);
+ if (ret)
+ return ret;
+ *m_seg = new_pkt;
+ }
+
+ return 0;
+}
+
uint16_t
hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
struct rte_net_hdr_lens hdr_lens = {0};
struct hns3_tx_queue *txq = tx_queue;
struct hns3_entry *tx_bak_pkt;
- struct rte_mbuf *new_pkt;
struct rte_mbuf *tx_pkt;
struct rte_mbuf *m_seg;
uint32_t nb_hold = 0;
goto end_of_tx;
}
- /*
- * If packet length is greater than HNS3_MAX_FRAME_LEN
- * driver support, the packet will be ignored.
- */
- if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN))
- break;
-
/*
* If packet length is less than minimum packet size, driver
* need to pad it.
}
m_seg = tx_pkt;
- if (unlikely(nb_buf > HNS3_MAX_TX_BD_PER_PKT)) {
- if (hns3_reassemble_tx_pkts(txq, tx_pkt, &new_pkt))
- goto end_of_tx;
- m_seg = new_pkt;
- nb_buf = m_seg->nb_segs;
- }
+
+ if (hns3_check_non_tso_pkt(nb_buf, &m_seg, tx_pkt, txq))
+ goto end_of_tx;
if (hns3_parse_cksum(txq, tx_next_use, m_seg, &hdr_lens))
goto end_of_tx;