* Copyright(c) 2018-2019 Hisilicon Limited.
*/
-#include <stdarg.h>
-#include <stdbool.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <unistd.h>
-#include <inttypes.h>
#include <rte_bus_pci.h>
-#include <rte_byteorder.h>
#include <rte_common.h>
#include <rte_cycles.h>
-#include <rte_dev.h>
-#include <rte_eal.h>
-#include <rte_ether.h>
#include <rte_vxlan.h>
#include <rte_ethdev_driver.h>
#include <rte_io.h>
-#include <rte_ip.h>
-#include <rte_gre.h>
#include <rte_net.h>
#include <rte_malloc.h>
-#include <rte_pci.h>
#if defined(RTE_ARCH_ARM64) && defined(CC_SVE_SUPPORT)
#include <rte_cpuflags.h>
#endif
for (i = 0; i < rxq->nb_rx_desc; i++) {
mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
if (unlikely(mbuf == NULL)) {
- hns3_err(hw, "Failed to allocate RXD[%d] for rx queue!",
+ hns3_err(hw, "Failed to allocate RXD[%u] for rx queue!",
i);
hns3_rx_queue_release_mbufs(rxq);
return -ENOMEM;
}
}
+static void
+hns3_stop_unused_queue(void *tqp_base, enum hns3_ring_type queue_type)
+{
+ uint32_t reg_offset;
+ uint32_t reg;
+
+ reg_offset = queue_type == HNS3_RING_TYPE_TX ?
+ HNS3_RING_TX_EN_REG : HNS3_RING_RX_EN_REG;
+ reg = hns3_read_reg(tqp_base, reg_offset);
+ reg &= ~BIT(HNS3_RING_EN_B);
+ hns3_write_reg(tqp_base, reg_offset, reg);
+}
+
void
hns3_enable_all_queues(struct hns3_hw *hw, bool en)
{
if (hns3_dev_indep_txrx_supported(hw)) {
rxq = i < nb_rx_q ? hw->data->rx_queues[i] : NULL;
txq = i < nb_tx_q ? hw->data->tx_queues[i] : NULL;
+
+ tqp_base = (void *)((char *)hw->io_base +
+ hns3_get_tqp_reg_offset(i));
/*
- * After initialization, rxq and txq won't be NULL at
- * the same time.
+ * If queue struct is not initialized, it means the
+ * related HW ring has not been initialized yet.
+ * So, these queues should be disabled before enable
+ * the tqps to avoid a HW exception since the queues
+ * are enabled by default.
*/
- if (rxq != NULL)
- tqp_base = rxq->io_base;
- else if (txq != NULL)
- tqp_base = txq->io_base;
- else
- return;
+ if (rxq == NULL)
+ hns3_stop_unused_queue(tqp_base,
+ HNS3_RING_TYPE_RX);
+ if (txq == NULL)
+ hns3_stop_unused_queue(tqp_base,
+ HNS3_RING_TYPE_TX);
} else {
rxq = i < nb_rx_q ? hw->data->rx_queues[i] :
hw->fkq_data.rx_queues[i - nb_rx_q];
return -EINVAL;
}
+void
+hns3_restore_tqp_enable_state(struct hns3_hw *hw)
+{
+ struct hns3_rx_queue *rxq;
+ struct hns3_tx_queue *txq;
+ uint16_t i;
+
+ for (i = 0; i < hw->data->nb_rx_queues; i++) {
+ rxq = hw->data->rx_queues[i];
+ if (rxq != NULL)
+ hns3_enable_rxq(rxq, rxq->enabled);
+ }
+
+ for (i = 0; i < hw->data->nb_tx_queues; i++) {
+ txq = hw->data->tx_queues[i];
+ if (txq != NULL)
+ hns3_enable_txq(txq, txq->enabled);
+ }
+}
+
void
hns3_stop_all_txqs(struct rte_eth_dev *dev)
{
{
uint32_t addr;
- if (hw->intr.coalesce_mode == HNS3_INTR_COALESCE_NON_QL)
+ /*
+ * int_ql_max == 0 means the hardware does not support QL,
+ * QL regs config is not permitted if QL is not supported,
+ * here just return.
+ */
+ if (hw->intr.int_ql_max == HNS3_INTR_QL_NONE)
return;
addr = HNS3_TQP_INTR_TX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
rxq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_rx_queue),
RTE_CACHE_LINE_SIZE, q_info->socket_id);
if (rxq == NULL) {
- hns3_err(hw, "Failed to allocate memory for No.%d rx ring!",
+ hns3_err(hw, "Failed to allocate memory for No.%u rx ring!",
q_info->idx);
return NULL;
}
rx_desc, HNS3_RING_BASE_ALIGN,
q_info->socket_id);
if (rx_mz == NULL) {
- hns3_err(hw, "Failed to reserve DMA memory for No.%d rx ring!",
+ hns3_err(hw, "Failed to reserve DMA memory for No.%u rx ring!",
q_info->idx);
hns3_rx_queue_release(rxq);
return NULL;
rxq->rx_ring = (struct hns3_desc *)rx_mz->addr;
rxq->rx_ring_phys_addr = rx_mz->iova;
- hns3_dbg(hw, "No.%d rx descriptors iova 0x%" PRIx64, q_info->idx,
+ hns3_dbg(hw, "No.%u rx descriptors iova 0x%" PRIx64, q_info->idx,
rxq->rx_ring_phys_addr);
return rxq;
q_info.ring_name = "rx_fake_ring";
rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
if (rxq == NULL) {
- hns3_err(hw, "Failed to setup No.%d fake rx ring.", idx);
+ hns3_err(hw, "Failed to setup No.%u fake rx ring.", idx);
return -ENOMEM;
}
txq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_tx_queue),
RTE_CACHE_LINE_SIZE, q_info->socket_id);
if (txq == NULL) {
- hns3_err(hw, "Failed to allocate memory for No.%d tx ring!",
+ hns3_err(hw, "Failed to allocate memory for No.%u tx ring!",
q_info->idx);
return NULL;
}
tx_desc, HNS3_RING_BASE_ALIGN,
q_info->socket_id);
if (tx_mz == NULL) {
- hns3_err(hw, "Failed to reserve DMA memory for No.%d tx ring!",
+ hns3_err(hw, "Failed to reserve DMA memory for No.%u tx ring!",
q_info->idx);
hns3_tx_queue_release(txq);
return NULL;
txq->tx_ring = (struct hns3_desc *)tx_mz->addr;
txq->tx_ring_phys_addr = tx_mz->iova;
- hns3_dbg(hw, "No.%d tx descriptors iova 0x%" PRIx64, q_info->idx,
+ hns3_dbg(hw, "No.%u tx descriptors iova 0x%" PRIx64, q_info->idx,
txq->tx_ring_phys_addr);
/* Clear tx bd */
q_info.ring_name = "tx_fake_ring";
txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
if (txq == NULL) {
- hns3_err(hw, "Failed to setup No.%d fake tx ring.", idx);
+ hns3_err(hw, "Failed to setup No.%u fake tx ring.", idx);
return -ENOMEM;
}
vld_buf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
RTE_PKTMBUF_HEADROOM);
-
if (vld_buf_size < HNS3_MIN_BD_BUF_SIZE)
return -EINVAL;
if (rs_thresh + free_thresh > nb_desc || nb_desc % rs_thresh ||
rs_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP ||
free_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP) {
- hns3_err(hw, "tx_rs_thresh (%d) tx_free_thresh (%d) nb_desc "
- "(%d) of tx descriptors for port=%d queue=%d check "
+ hns3_err(hw, "tx_rs_thresh (%u) tx_free_thresh (%u) nb_desc "
+ "(%u) of tx descriptors for port=%u queue=%u check "
"fail!",
rs_thresh, free_thresh, nb_desc, hw->data->port_id,
idx);
txq->tx_bd_ready = tx_bd_ready;
}
-static int
-hns3_tso_proc_tunnel(struct hns3_desc *desc, uint64_t ol_flags,
- struct rte_mbuf *rxm, uint8_t *l2_len)
-{
- uint64_t tun_flags;
- uint8_t ol4_len;
- uint32_t otmp;
-
- tun_flags = ol_flags & PKT_TX_TUNNEL_MASK;
- if (tun_flags == 0)
- return 0;
-
- otmp = rte_le_to_cpu_32(desc->tx.ol_type_vlan_len_msec);
- switch (tun_flags) {
- case PKT_TX_TUNNEL_GENEVE:
- case PKT_TX_TUNNEL_VXLAN:
- *l2_len = rxm->l2_len - RTE_ETHER_VXLAN_HLEN;
- break;
- case PKT_TX_TUNNEL_GRE:
- /*
- * OL4 header size, defined in 4 Bytes, it contains outer
- * L4(GRE) length and tunneling length.
- */
- ol4_len = hns3_get_field(otmp, HNS3_TXD_L4LEN_M,
- HNS3_TXD_L4LEN_S);
- *l2_len = rxm->l2_len - (ol4_len << HNS3_L4_LEN_UNIT);
- break;
- default:
- /* For non UDP / GRE tunneling, drop the tunnel packet */
- return -EINVAL;
- }
- hns3_set_field(otmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
- rxm->outer_l2_len >> HNS3_L2_LEN_UNIT);
- desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(otmp);
-
- return 0;
-}
-
int
hns3_config_gro(struct hns3_hw *hw, bool en)
{
}
static void
-hns3_set_tso(struct hns3_desc *desc, uint64_t ol_flags,
- uint32_t paylen, struct rte_mbuf *rxm)
+hns3_set_tso(struct hns3_desc *desc, uint32_t paylen, struct rte_mbuf *rxm)
{
- uint8_t l2_len = rxm->l2_len;
- uint32_t tmp;
-
if (!hns3_pkt_is_tso(rxm))
return;
- if (hns3_tso_proc_tunnel(desc, ol_flags, rxm, &l2_len))
- return;
-
if (paylen <= rxm->tso_segsz)
return;
- tmp = rte_le_to_cpu_32(desc->tx.type_cs_vlan_tso_len);
- hns3_set_bit(tmp, HNS3_TXD_TSO_B, 1);
- hns3_set_bit(tmp, HNS3_TXD_L3CS_B, 1);
- hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, HNS3_L4T_TCP);
- hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
- hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
- sizeof(struct rte_tcp_hdr) >> HNS3_L4_LEN_UNIT);
- hns3_set_field(tmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
- l2_len >> HNS3_L2_LEN_UNIT);
- desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp);
+ desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(BIT(HNS3_TXD_TSO_B));
desc->tx.mss = rte_cpu_to_le_16(rxm->tso_segsz);
}
rxm->outer_l2_len + rxm->outer_l3_len : 0;
paylen = rxm->pkt_len - hdr_len;
desc->tx.paylen = rte_cpu_to_le_32(paylen);
- hns3_set_tso(desc, ol_flags, paylen, rxm);
+ hns3_set_tso(desc, paylen, rxm);
/*
* Currently, hardware doesn't support more than two layers VLAN offload
}
static void
-hns3_parse_outer_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec)
+hns3_parse_outer_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec)
{
uint32_t tmp = *ol_type_vlan_len_msec;
+ uint64_t ol_flags = m->ol_flags;
/* (outer) IP header type */
if (ol_flags & PKT_TX_OUTER_IPV4) {
- /* OL3 header size, defined in 4 bytes */
- hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
- sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT);
if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
- hns3_set_field(tmp, HNS3_TXD_OL3T_M,
- HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_CSUM);
+ tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M,
+ HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_CSUM);
else
- hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
- HNS3_OL3T_IPV4_NO_CSUM);
+ tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M,
+ HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_NO_CSUM);
} else if (ol_flags & PKT_TX_OUTER_IPV6) {
- hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
- HNS3_OL3T_IPV6);
- /* OL3 header size, defined in 4 bytes */
- hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
- sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT);
+ tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
+ HNS3_OL3T_IPV6);
}
-
+ /* OL3 header size, defined in 4 bytes */
+ tmp |= hns3_gen_field_val(HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
+ m->outer_l3_len >> HNS3_L3_LEN_UNIT);
*ol_type_vlan_len_msec = tmp;
}
static int
-hns3_parse_inner_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec,
- struct rte_net_hdr_lens *hdr_lens)
+hns3_parse_inner_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec,
+ uint32_t *type_cs_vlan_tso_len)
{
- uint32_t tmp = *ol_type_vlan_len_msec;
- uint8_t l4_len;
-
- /* OL2 header size, defined in 2 bytes */
- hns3_set_field(tmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
- sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT);
+#define HNS3_NVGRE_HLEN 8
+ uint32_t tmp_outer = *ol_type_vlan_len_msec;
+ uint32_t tmp_inner = *type_cs_vlan_tso_len;
+ uint64_t ol_flags = m->ol_flags;
+ uint16_t inner_l2_len;
- /* L4TUNT: L4 Tunneling Type */
switch (ol_flags & PKT_TX_TUNNEL_MASK) {
+ case PKT_TX_TUNNEL_VXLAN_GPE:
case PKT_TX_TUNNEL_GENEVE:
case PKT_TX_TUNNEL_VXLAN:
- /* MAC in UDP tunnelling packet, include VxLAN */
- hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S,
- HNS3_TUN_MAC_IN_UDP);
+ /* MAC in UDP tunnelling packet, include VxLAN and GENEVE */
+ tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M,
+ HNS3_TXD_TUNTYPE_S, HNS3_TUN_MAC_IN_UDP);
/*
- * OL4 header size, defined in 4 Bytes, it contains outer
- * L4(UDP) length and tunneling length.
+ * The inner l2 length of mbuf is the sum of outer l4 length,
+ * tunneling header length and inner l2 length for a tunnel
+ * packect. But in hns3 tx descriptor, the tunneling header
+ * length is contained in the field of outer L4 length.
+ * Therefore, driver need to calculate the outer L4 length and
+ * inner L2 length.
*/
- hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
- (uint8_t)RTE_ETHER_VXLAN_HLEN >>
- HNS3_L4_LEN_UNIT);
+ tmp_outer |= hns3_gen_field_val(HNS3_TXD_L4LEN_M,
+ HNS3_TXD_L4LEN_S,
+ (uint8_t)RTE_ETHER_VXLAN_HLEN >>
+ HNS3_L4_LEN_UNIT);
+
+ inner_l2_len = m->l2_len - RTE_ETHER_VXLAN_HLEN;
break;
case PKT_TX_TUNNEL_GRE:
- hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S,
- HNS3_TUN_NVGRE);
+ tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M,
+ HNS3_TXD_TUNTYPE_S, HNS3_TUN_NVGRE);
/*
- * OL4 header size, defined in 4 Bytes, it contains outer
- * L4(GRE) length and tunneling length.
+ * For NVGRE tunnel packect, the outer L4 is empty. So only
+ * fill the NVGRE header length to the outer L4 field.
*/
- l4_len = hdr_lens->l4_len + hdr_lens->tunnel_len;
- hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
- l4_len >> HNS3_L4_LEN_UNIT);
+ tmp_outer |= hns3_gen_field_val(HNS3_TXD_L4LEN_M,
+ HNS3_TXD_L4LEN_S,
+ (uint8_t)HNS3_NVGRE_HLEN >> HNS3_L4_LEN_UNIT);
+
+ inner_l2_len = m->l2_len - HNS3_NVGRE_HLEN;
break;
default:
/* For non UDP / GRE tunneling, drop the tunnel packet */
return -EINVAL;
}
- *ol_type_vlan_len_msec = tmp;
+ tmp_inner |= hns3_gen_field_val(HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
+ inner_l2_len >> HNS3_L2_LEN_UNIT);
+ /* OL2 header size, defined in 2 bytes */
+ tmp_outer |= hns3_gen_field_val(HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
+ m->outer_l2_len >> HNS3_L2_LEN_UNIT);
+
+ *type_cs_vlan_tso_len = tmp_inner;
+ *ol_type_vlan_len_msec = tmp_outer;
return 0;
}
static int
-hns3_parse_tunneling_params(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
- uint64_t ol_flags,
- struct rte_net_hdr_lens *hdr_lens)
+hns3_parse_tunneling_params(struct hns3_tx_queue *txq, struct rte_mbuf *m,
+ uint16_t tx_desc_id)
{
struct hns3_desc *tx_ring = txq->tx_ring;
struct hns3_desc *desc = &tx_ring[tx_desc_id];
- uint32_t value = 0;
+ uint32_t tmp_outer = 0;
+ uint32_t tmp_inner = 0;
int ret;
- hns3_parse_outer_params(ol_flags, &value);
- ret = hns3_parse_inner_params(ol_flags, &value, hdr_lens);
- if (ret)
- return -EINVAL;
+ /*
+ * The tunnel header is contained in the inner L2 header field of the
+ * mbuf, but for hns3 descriptor, it is contained in the outer L4. So,
+ * there is a need that switching between them. To avoid multiple
+ * calculations, the length of the L2 header include the outer and
+ * inner, will be filled during the parsing of tunnel packects.
+ */
+ if (!(m->ol_flags & PKT_TX_TUNNEL_MASK)) {
+ /*
+ * For non tunnel type the tunnel type id is 0, so no need to
+ * assign a value to it. Only the inner(normal) L2 header length
+ * is assigned.
+ */
+ tmp_inner |= hns3_gen_field_val(HNS3_TXD_L2LEN_M,
+ HNS3_TXD_L2LEN_S, m->l2_len >> HNS3_L2_LEN_UNIT);
+ } else {
+ /*
+ * If outer csum is not offload, the outer length may be filled
+ * with 0. And the length of the outer header is added to the
+ * inner l2_len. It would lead a cksum error. So driver has to
+ * calculate the header length.
+ */
+ if (unlikely(!(m->ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
+ m->outer_l2_len == 0)) {
+ struct rte_net_hdr_lens hdr_len;
+ (void)rte_net_get_ptype(m, &hdr_len,
+ RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
+ m->outer_l3_len = hdr_len.l3_len;
+ m->outer_l2_len = hdr_len.l2_len;
+ m->l2_len = m->l2_len - hdr_len.l2_len - hdr_len.l3_len;
+ }
+ hns3_parse_outer_params(m, &tmp_outer);
+ ret = hns3_parse_inner_params(m, &tmp_outer, &tmp_inner);
+ if (ret)
+ return -EINVAL;
+ }
- desc->tx.ol_type_vlan_len_msec |= rte_cpu_to_le_32(value);
+ desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(tmp_outer);
+ desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp_inner);
return 0;
}
static void
-hns3_parse_l3_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len)
+hns3_parse_l3_cksum_params(struct rte_mbuf *m, uint32_t *type_cs_vlan_tso_len)
{
+ uint64_t ol_flags = m->ol_flags;
+ uint32_t l3_type;
uint32_t tmp;
+ tmp = *type_cs_vlan_tso_len;
+ if (ol_flags & PKT_TX_IPV4)
+ l3_type = HNS3_L3T_IPV4;
+ else if (ol_flags & PKT_TX_IPV6)
+ l3_type = HNS3_L3T_IPV6;
+ else
+ l3_type = HNS3_L3T_NONE;
+
+ /* inner(/normal) L3 header size, defined in 4 bytes */
+ tmp |= hns3_gen_field_val(HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
+ m->l3_len >> HNS3_L3_LEN_UNIT);
+
+ tmp |= hns3_gen_field_val(HNS3_TXD_L3T_M, HNS3_TXD_L3T_S, l3_type);
+
/* Enable L3 checksum offloads */
- if (ol_flags & PKT_TX_IPV4) {
- tmp = *type_cs_vlan_tso_len;
- hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S,
- HNS3_L3T_IPV4);
- /* inner(/normal) L3 header size, defined in 4 bytes */
- hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
- sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT);
- if (ol_flags & PKT_TX_IP_CKSUM)
- hns3_set_bit(tmp, HNS3_TXD_L3CS_B, 1);
- *type_cs_vlan_tso_len = tmp;
- } else if (ol_flags & PKT_TX_IPV6) {
- tmp = *type_cs_vlan_tso_len;
- /* L3T, IPv6 don't do checksum */
- hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S,
- HNS3_L3T_IPV6);
- /* inner(/normal) L3 header size, defined in 4 bytes */
- hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
- sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT);
- *type_cs_vlan_tso_len = tmp;
- }
+ if (ol_flags & PKT_TX_IP_CKSUM)
+ tmp |= BIT(HNS3_TXD_L3CS_B);
+ *type_cs_vlan_tso_len = tmp;
}
static void
-hns3_parse_l4_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len)
+hns3_parse_l4_cksum_params(struct rte_mbuf *m, uint32_t *type_cs_vlan_tso_len)
{
+ uint64_t ol_flags = m->ol_flags;
uint32_t tmp;
-
/* Enable L4 checksum offloads */
- switch (ol_flags & PKT_TX_L4_MASK) {
+ switch (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG)) {
case PKT_TX_TCP_CKSUM:
+ case PKT_TX_TCP_SEG:
tmp = *type_cs_vlan_tso_len;
- hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
- HNS3_L4T_TCP);
- hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
- hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
- sizeof(struct rte_tcp_hdr) >> HNS3_L4_LEN_UNIT);
- *type_cs_vlan_tso_len = tmp;
+ tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
+ HNS3_L4T_TCP);
break;
case PKT_TX_UDP_CKSUM:
tmp = *type_cs_vlan_tso_len;
- hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
- HNS3_L4T_UDP);
- hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
- hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
- sizeof(struct rte_udp_hdr) >> HNS3_L4_LEN_UNIT);
- *type_cs_vlan_tso_len = tmp;
+ tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
+ HNS3_L4T_UDP);
break;
case PKT_TX_SCTP_CKSUM:
tmp = *type_cs_vlan_tso_len;
- hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
- HNS3_L4T_SCTP);
- hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
- hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
- sizeof(struct rte_sctp_hdr) >> HNS3_L4_LEN_UNIT);
- *type_cs_vlan_tso_len = tmp;
+ tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
+ HNS3_L4T_SCTP);
break;
default:
- break;
+ return;
}
+ tmp |= BIT(HNS3_TXD_L4CS_B);
+ tmp |= hns3_gen_field_val(HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
+ m->l4_len >> HNS3_L4_LEN_UNIT);
+ *type_cs_vlan_tso_len = tmp;
}
static void
-hns3_txd_enable_checksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
- uint64_t ol_flags)
+hns3_txd_enable_checksum(struct hns3_tx_queue *txq, struct rte_mbuf *m,
+ uint16_t tx_desc_id)
{
struct hns3_desc *tx_ring = txq->tx_ring;
struct hns3_desc *desc = &tx_ring[tx_desc_id];
uint32_t value = 0;
- /* inner(/normal) L2 header size, defined in 2 bytes */
- hns3_set_field(value, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
- sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT);
-
- hns3_parse_l3_cksum_params(ol_flags, &value);
- hns3_parse_l4_cksum_params(ol_flags, &value);
+ hns3_parse_l3_cksum_params(m, &value);
+ hns3_parse_l4_cksum_params(m, &value);
desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(value);
}
hns3_outer_header_cksum_prepare(struct rte_mbuf *m)
{
uint64_t ol_flags = m->ol_flags;
- struct rte_ipv4_hdr *ipv4_hdr;
- struct rte_udp_hdr *udp_hdr;
- uint32_t paylen, hdr_len;
+ uint32_t paylen, hdr_len, l4_proto;
if (!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6)))
return;
- if (ol_flags & PKT_TX_IPV4) {
+ if (ol_flags & PKT_TX_OUTER_IPV4) {
+ struct rte_ipv4_hdr *ipv4_hdr;
ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
m->outer_l2_len);
-
- if (ol_flags & PKT_TX_IP_CKSUM)
+ l4_proto = ipv4_hdr->next_proto_id;
+ if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
ipv4_hdr->hdr_checksum = 0;
+ } else {
+ struct rte_ipv6_hdr *ipv6_hdr;
+ ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
+ m->outer_l2_len);
+ l4_proto = ipv6_hdr->proto;
}
-
- if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM &&
- ol_flags & PKT_TX_TCP_SEG) {
+ /* driver should ensure the outer udp cksum is 0 for TUNNEL TSO */
+ if (l4_proto == IPPROTO_UDP && (ol_flags & PKT_TX_TCP_SEG)) {
+ struct rte_udp_hdr *udp_hdr;
hdr_len = m->l2_len + m->l3_len + m->l4_len;
- hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
- m->outer_l2_len + m->outer_l3_len : 0;
+ hdr_len += m->outer_l2_len + m->outer_l3_len;
paylen = m->pkt_len - hdr_len;
if (paylen <= m->tso_segsz)
return;
static int
hns3_parse_cksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
- const struct rte_mbuf *m, struct rte_net_hdr_lens *hdr_lens)
+ struct rte_mbuf *m)
{
- /* Fill in tunneling parameters if necessary */
- if (m->ol_flags & PKT_TX_TUNNEL_MASK) {
- (void)rte_net_get_ptype(m, hdr_lens, RTE_PTYPE_ALL_MASK);
- if (hns3_parse_tunneling_params(txq, tx_desc_id, m->ol_flags,
- hdr_lens)) {
+ struct hns3_desc *tx_ring = txq->tx_ring;
+ struct hns3_desc *desc = &tx_ring[tx_desc_id];
+
+ /* Enable checksum offloading */
+ if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK) {
+ /* Fill in tunneling parameters if necessary */
+ if (hns3_parse_tunneling_params(txq, m, tx_desc_id)) {
txq->unsupported_tunnel_pkt_cnt++;
- return -EINVAL;
+ return -EINVAL;
}
+
+ hns3_txd_enable_checksum(txq, m, tx_desc_id);
+ } else {
+ /* clear the control bit */
+ desc->tx.type_cs_vlan_tso_len = 0;
+ desc->tx.ol_type_vlan_len_msec = 0;
}
- /* Enable checksum offloading */
- if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK)
- hns3_txd_enable_checksum(txq, tx_desc_id, m->ol_flags);
return 0;
}
uint16_t
hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
- struct rte_net_hdr_lens hdr_lens = {0};
struct hns3_tx_queue *txq = tx_queue;
struct hns3_entry *tx_bak_pkt;
struct hns3_desc *tx_ring;
if (hns3_check_non_tso_pkt(nb_buf, &m_seg, tx_pkt, txq))
goto end_of_tx;
- if (hns3_parse_cksum(txq, tx_next_use, m_seg, &hdr_lens))
+ if (hns3_parse_cksum(txq, tx_next_use, m_seg))
goto end_of_tx;
i = 0;
return ret;
}
+static void
+hns3_reset_sw_rxq(struct hns3_rx_queue *rxq)
+{
+ rxq->next_to_use = 0;
+ rxq->rx_rearm_start = 0;
+ rxq->rx_free_hold = 0;
+ rxq->rx_rearm_nb = 0;
+ rxq->pkt_first_seg = NULL;
+ rxq->pkt_last_seg = NULL;
+ memset(&rxq->rx_ring[0], 0, rxq->nb_rx_desc * sizeof(struct hns3_desc));
+ hns3_rxq_vec_setup(rxq);
+}
+
int
hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
return -ENOTSUP;
hns3_enable_rxq(rxq, false);
+
hns3_rx_queue_release_mbufs(rxq);
+
+ hns3_reset_sw_rxq(rxq);
dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;