* Copyright(c) 2018-2019 Hisilicon Limited.
*/
-#include <stdarg.h>
-#include <stdbool.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <unistd.h>
-#include <inttypes.h>
#include <rte_bus_pci.h>
-#include <rte_byteorder.h>
#include <rte_common.h>
#include <rte_cycles.h>
-#include <rte_dev.h>
-#include <rte_eal.h>
-#include <rte_ether.h>
#include <rte_vxlan.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#include <rte_io.h>
-#include <rte_ip.h>
-#include <rte_gre.h>
#include <rte_net.h>
#include <rte_malloc.h>
-#include <rte_pci.h>
-#if defined(RTE_ARCH_ARM64) && defined(CC_SVE_SUPPORT)
+#if defined(RTE_ARCH_ARM64) && defined(__ARM_FEATURE_SVE)
#include <rte_cpuflags.h>
#endif
for (i = 0; i < rxq->nb_rx_desc; i++) {
mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
if (unlikely(mbuf == NULL)) {
- hns3_err(hw, "Failed to allocate RXD[%d] for rx queue!",
+ hns3_err(hw, "Failed to allocate RXD[%u] for rx queue!",
i);
hns3_rx_queue_release_mbufs(rxq);
return -ENOMEM;
}
}
+static void
+hns3_stop_unused_queue(void *tqp_base, enum hns3_ring_type queue_type)
+{
+ uint32_t reg_offset;
+ uint32_t reg;
+
+ reg_offset = queue_type == HNS3_RING_TYPE_TX ?
+ HNS3_RING_TX_EN_REG : HNS3_RING_RX_EN_REG;
+ reg = hns3_read_reg(tqp_base, reg_offset);
+ reg &= ~BIT(HNS3_RING_EN_B);
+ hns3_write_reg(tqp_base, reg_offset, reg);
+}
+
void
hns3_enable_all_queues(struct hns3_hw *hw, bool en)
{
if (hns3_dev_indep_txrx_supported(hw)) {
rxq = i < nb_rx_q ? hw->data->rx_queues[i] : NULL;
txq = i < nb_tx_q ? hw->data->tx_queues[i] : NULL;
+
+ tqp_base = (void *)((char *)hw->io_base +
+ hns3_get_tqp_reg_offset(i));
/*
- * After initialization, rxq and txq won't be NULL at
- * the same time.
+ * If queue struct is not initialized, it means the
+ * related HW ring has not been initialized yet.
+ * So, these queues should be disabled before enable
+ * the tqps to avoid a HW exception since the queues
+ * are enabled by default.
*/
- if (rxq != NULL)
- tqp_base = rxq->io_base;
- else if (txq != NULL)
- tqp_base = txq->io_base;
- else
- return;
+ if (rxq == NULL)
+ hns3_stop_unused_queue(tqp_base,
+ HNS3_RING_TYPE_RX);
+ if (txq == NULL)
+ hns3_stop_unused_queue(tqp_base,
+ HNS3_RING_TYPE_TX);
} else {
rxq = i < nb_rx_q ? hw->data->rx_queues[i] :
hw->fkq_data.rx_queues[i - nb_rx_q];
return -EINVAL;
}
+void
+hns3_restore_tqp_enable_state(struct hns3_hw *hw)
+{
+ struct hns3_rx_queue *rxq;
+ struct hns3_tx_queue *txq;
+ uint16_t i;
+
+ for (i = 0; i < hw->data->nb_rx_queues; i++) {
+ rxq = hw->data->rx_queues[i];
+ if (rxq != NULL)
+ hns3_enable_rxq(rxq, rxq->enabled);
+ }
+
+ for (i = 0; i < hw->data->nb_tx_queues; i++) {
+ txq = hw->data->tx_queues[i];
+ if (txq != NULL)
+ hns3_enable_txq(txq, txq->enabled);
+ }
+}
+
void
hns3_stop_all_txqs(struct rte_eth_dev *dev)
{
return ret;
}
+uint32_t
+hns3_get_tqp_intr_reg_offset(uint16_t tqp_intr_id)
+{
+ uint32_t reg_offset;
+
+ /* Need an extend offset to config queues > 64 */
+ if (tqp_intr_id < HNS3_MIN_EXT_TQP_INTR_ID)
+ reg_offset = HNS3_TQP_INTR_REG_BASE +
+ tqp_intr_id * HNS3_TQP_INTR_LOW_ORDER_OFFSET;
+ else
+ reg_offset = HNS3_TQP_INTR_EXT_REG_BASE +
+ tqp_intr_id / HNS3_MIN_EXT_TQP_INTR_ID *
+ HNS3_TQP_INTR_HIGH_ORDER_OFFSET +
+ tqp_intr_id % HNS3_MIN_EXT_TQP_INTR_ID *
+ HNS3_TQP_INTR_LOW_ORDER_OFFSET;
+
+ return reg_offset;
+}
void
hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
if (gl_idx >= RTE_DIM(offset) || gl_value > HNS3_TQP_INTR_GL_MAX)
return;
- addr = offset[gl_idx] + queue_id * HNS3_TQP_INTR_REG_SIZE;
+ addr = offset[gl_idx] + hns3_get_tqp_intr_reg_offset(queue_id);
if (hw->intr.gl_unit == HNS3_INTR_COALESCE_GL_UINT_1US)
value = gl_value | HNS3_TQP_INTR_GL_UNIT_1US;
else
if (rl_value > HNS3_TQP_INTR_RL_MAX)
return;
- addr = HNS3_TQP_INTR_RL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
+ addr = HNS3_TQP_INTR_RL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
value = HNS3_RL_USEC_TO_REG(rl_value);
if (value > 0)
value |= HNS3_TQP_INTR_RL_ENABLE_MASK;
{
uint32_t addr;
- if (hw->intr.coalesce_mode == HNS3_INTR_COALESCE_NON_QL)
+ /*
+ * int_ql_max == 0 means the hardware does not support QL,
+ * QL regs config is not permitted if QL is not supported,
+ * here just return.
+ */
+ if (hw->intr.int_ql_max == HNS3_INTR_QL_NONE)
return;
- addr = HNS3_TQP_INTR_TX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
+ addr = HNS3_TQP_INTR_TX_QL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
hns3_write_dev(hw, addr, ql_value);
- addr = HNS3_TQP_INTR_RX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
+ addr = HNS3_TQP_INTR_RX_QL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
hns3_write_dev(hw, addr, ql_value);
}
{
uint32_t addr, value;
- addr = HNS3_TQP_INTR_CTRL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
+ addr = HNS3_TQP_INTR_CTRL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
value = en ? 1 : 0;
hns3_write_dev(hw, addr, value);
rxq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_rx_queue),
RTE_CACHE_LINE_SIZE, q_info->socket_id);
if (rxq == NULL) {
- hns3_err(hw, "Failed to allocate memory for No.%d rx ring!",
+ hns3_err(hw, "Failed to allocate memory for No.%u rx ring!",
q_info->idx);
return NULL;
}
rx_desc, HNS3_RING_BASE_ALIGN,
q_info->socket_id);
if (rx_mz == NULL) {
- hns3_err(hw, "Failed to reserve DMA memory for No.%d rx ring!",
+ hns3_err(hw, "Failed to reserve DMA memory for No.%u rx ring!",
q_info->idx);
hns3_rx_queue_release(rxq);
return NULL;
rxq->rx_ring = (struct hns3_desc *)rx_mz->addr;
rxq->rx_ring_phys_addr = rx_mz->iova;
- hns3_dbg(hw, "No.%d rx descriptors iova 0x%" PRIx64, q_info->idx,
+ hns3_dbg(hw, "No.%u rx descriptors iova 0x%" PRIx64, q_info->idx,
rxq->rx_ring_phys_addr);
return rxq;
q_info.ring_name = "rx_fake_ring";
rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
if (rxq == NULL) {
- hns3_err(hw, "Failed to setup No.%d fake rx ring.", idx);
+ hns3_err(hw, "Failed to setup No.%u fake rx ring.", idx);
return -ENOMEM;
}
txq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_tx_queue),
RTE_CACHE_LINE_SIZE, q_info->socket_id);
if (txq == NULL) {
- hns3_err(hw, "Failed to allocate memory for No.%d tx ring!",
+ hns3_err(hw, "Failed to allocate memory for No.%u tx ring!",
q_info->idx);
return NULL;
}
tx_desc, HNS3_RING_BASE_ALIGN,
q_info->socket_id);
if (tx_mz == NULL) {
- hns3_err(hw, "Failed to reserve DMA memory for No.%d tx ring!",
+ hns3_err(hw, "Failed to reserve DMA memory for No.%u tx ring!",
q_info->idx);
hns3_tx_queue_release(txq);
return NULL;
txq->tx_ring = (struct hns3_desc *)tx_mz->addr;
txq->tx_ring_phys_addr = tx_mz->iova;
- hns3_dbg(hw, "No.%d tx descriptors iova 0x%" PRIx64, q_info->idx,
+ hns3_dbg(hw, "No.%u tx descriptors iova 0x%" PRIx64, q_info->idx,
txq->tx_ring_phys_addr);
/* Clear tx bd */
q_info.ring_name = "tx_fake_ring";
txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
if (txq == NULL) {
- hns3_err(hw, "Failed to setup No.%d fake tx ring.", idx);
+ hns3_err(hw, "Failed to setup No.%u fake tx ring.", idx);
return -ENOMEM;
}
vld_buf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
RTE_PKTMBUF_HEADROOM);
-
if (vld_buf_size < HNS3_MIN_BD_BUF_SIZE)
return -EINVAL;
HNS3_PORT_BASE_VLAN_ENABLE;
else
rxq->pvid_sw_discard_en = false;
+ rxq->ptype_en = hns3_dev_rxd_adv_layout_supported(hw) ? true : false;
rxq->configured = true;
rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
idx * HNS3_TQP_REG_SIZE);
rxq->io_head_reg = (volatile void *)((char *)rxq->io_base +
HNS3_RING_RX_HEAD_REG);
rxq->rx_buf_len = rx_buf_size;
- rxq->l2_errors = 0;
- rxq->pkt_len_errors = 0;
- rxq->l3_csum_errors = 0;
- rxq->l4_csum_errors = 0;
- rxq->ol3_csum_errors = 0;
- rxq->ol4_csum_errors = 0;
+ memset(&rxq->basic_stats, 0, sizeof(struct hns3_rx_basic_stats));
+ memset(&rxq->err_stats, 0, sizeof(struct hns3_rx_bd_errors_stats));
+ memset(&rxq->dfx_stats, 0, sizeof(struct hns3_rx_dfx_stats));
/* CRC len set here is used for amending packet length */
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
tbl->ol4table[2] = RTE_PTYPE_TUNNEL_NVGRE;
}
+static void
+hns3_init_adv_layout_ptype(struct hns3_ptype_table *tbl)
+{
+ uint32_t *ptype = tbl->ptype;
+
+ /* Non-tunnel L2 */
+ ptype[1] = RTE_PTYPE_L2_ETHER_ARP;
+ ptype[3] = RTE_PTYPE_L2_ETHER_LLDP;
+ ptype[8] = RTE_PTYPE_L2_ETHER_TIMESYNC;
+
+ /* Non-tunnel IPv4 */
+ ptype[17] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG;
+ ptype[18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG;
+ ptype[19] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP;
+ ptype[20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ /* The next ptype is GRE over IPv4 */
+ ptype[21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+ ptype[22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP;
+ ptype[23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_IGMP;
+ ptype[24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP;
+ /* The next ptype is PTP over IPv4 + UDP */
+ ptype[25] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP;
+
+ /* IPv4 --> GRE/Teredo/VXLAN */
+ ptype[29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT;
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
+ ptype[30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER;
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
+ ptype[31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG;
+ ptype[32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG;
+ ptype[33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP;
+ ptype[34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ ptype[35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP;
+ /* The next ptype's inner L4 is IGMP */
+ ptype[36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
+ ptype[37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP;
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
+ ptype[39] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG;
+ ptype[40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG;
+ ptype[41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP;
+ ptype[42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ ptype[43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP;
+ /* The next ptype's inner L4 is IGMP */
+ ptype[44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
+ ptype[45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP;
+
+ /* Non-tunnel IPv6 */
+ ptype[111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG;
+ ptype[112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG;
+ ptype[113] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP;
+ ptype[114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ /* The next ptype is GRE over IPv6 */
+ ptype[115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+ ptype[116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP;
+ ptype[117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_IGMP;
+ ptype[118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP;
+ /* Special for PTP over IPv6 + UDP */
+ ptype[119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP;
+
+ /* IPv6 --> GRE/Teredo/VXLAN */
+ ptype[123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT;
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
+ ptype[124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER;
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
+ ptype[125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG;
+ ptype[126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG;
+ ptype[127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP;
+ ptype[128] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ ptype[129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP;
+ /* The next ptype's inner L4 is IGMP */
+ ptype[130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
+ ptype[131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP;
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
+ ptype[133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG;
+ ptype[134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG;
+ ptype[135] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP;
+ ptype[136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ ptype[137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP;
+ /* The next ptype's inner L4 is IGMP */
+ ptype[138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
+ ptype[139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP;
+}
+
void
hns3_init_rx_ptype_tble(struct rte_eth_dev *dev)
{
hns3_init_non_tunnel_ptype_tbl(tbl);
hns3_init_tunnel_ptype_tbl(tbl);
+ hns3_init_adv_layout_ptype(tbl);
}
static inline void
cksum_err);
hns3_rxd_to_vlan_tci(rxq, rxm, l234_info, &rxd);
+ /* Increment bytes counter */
+ rxq->basic_stats.bytes += rxm->pkt_len;
+
rx_pkts[nb_rx++] = rxm;
continue;
pkt_err:
cksum_err);
hns3_rxd_to_vlan_tci(rxq, first_seg, l234_info, &rxd);
+ /* Increment bytes counter */
+ rxq->basic_stats.bytes += first_seg->pkt_len;
+
rx_pkts[nb_rx++] = first_seg;
first_seg = NULL;
continue;
static bool
hns3_check_sve_support(void)
{
-#if defined(RTE_ARCH_ARM64) && defined(CC_SVE_SUPPORT)
+#if defined(RTE_ARCH_ARM64) && defined(__ARM_FEATURE_SVE)
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SVE))
return true;
#endif
{
struct hns3_adapter *hns = dev->data->dev_private;
uint64_t offloads = dev->data->dev_conf.rxmode.offloads;
+ bool vec_allowed, sve_allowed, simple_allowed;
+
+ vec_allowed = hns->rx_vec_allowed &&
+ hns3_rx_check_vec_support(dev) == 0;
+ sve_allowed = vec_allowed && hns3_check_sve_support();
+ simple_allowed = hns->rx_simple_allowed && !dev->data->scattered_rx &&
+ (offloads & DEV_RX_OFFLOAD_TCP_LRO) == 0;
+
+ if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed)
+ return hns3_recv_pkts_vec;
+ if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_SVE && sve_allowed)
+ return hns3_recv_pkts_vec_sve;
+ if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_SIMPLE && simple_allowed)
+ return hns3_recv_pkts;
+ if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_COMMON)
+ return hns3_recv_scattered_pkts;
- if (hns->rx_vec_allowed && hns3_rx_check_vec_support(dev) == 0)
- return hns3_check_sve_support() ? hns3_recv_pkts_vec_sve :
- hns3_recv_pkts_vec;
-
- if (hns->rx_simple_allowed && !dev->data->scattered_rx &&
- (offloads & DEV_RX_OFFLOAD_TCP_LRO) == 0)
+ if (vec_allowed)
+ return hns3_recv_pkts_vec;
+ if (simple_allowed)
return hns3_recv_pkts;
return hns3_recv_scattered_pkts;
if (rs_thresh + free_thresh > nb_desc || nb_desc % rs_thresh ||
rs_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP ||
free_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP) {
- hns3_err(hw, "tx_rs_thresh (%d) tx_free_thresh (%d) nb_desc "
- "(%d) of tx descriptors for port=%d queue=%d check "
+ hns3_err(hw, "tx_rs_thresh (%u) tx_free_thresh (%u) nb_desc "
+ "(%u) of tx descriptors for port=%u queue=%u check "
"fail!",
rs_thresh, free_thresh, nb_desc, hw->data->port_id,
idx);
HNS3_RING_TX_TAIL_REG);
txq->min_tx_pkt_len = hw->min_tx_pkt_len;
txq->tso_mode = hw->tso_mode;
- txq->over_length_pkt_cnt = 0;
- txq->exceed_limit_bd_pkt_cnt = 0;
- txq->exceed_limit_bd_reassem_fail = 0;
- txq->unsupported_tunnel_pkt_cnt = 0;
- txq->queue_full_cnt = 0;
- txq->pkt_padding_fail_cnt = 0;
+ memset(&txq->basic_stats, 0, sizeof(struct hns3_tx_basic_stats));
+ memset(&txq->dfx_stats, 0, sizeof(struct hns3_tx_dfx_stats));
+
rte_spinlock_lock(&hw->lock);
dev->data->tx_queues[idx] = txq;
rte_spinlock_unlock(&hw->lock);
hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
rxm->outer_l2_len + rxm->outer_l3_len : 0;
paylen = rxm->pkt_len - hdr_len;
- desc->tx.paylen = rte_cpu_to_le_32(paylen);
+ desc->tx.paylen_fd_dop_ol4cs |= rte_cpu_to_le_32(paylen);
hns3_set_tso(desc, paylen, rxm);
/*
{
struct hns3_desc *tx_ring = txq->tx_ring;
struct hns3_desc *desc = &tx_ring[tx_desc_id];
+ uint64_t ol_flags = m->ol_flags;
uint32_t tmp_outer = 0;
uint32_t tmp_inner = 0;
+ uint32_t tmp_ol4cs;
int ret;
/*
* calculations, the length of the L2 header include the outer and
* inner, will be filled during the parsing of tunnel packects.
*/
- if (!(m->ol_flags & PKT_TX_TUNNEL_MASK)) {
+ if (!(ol_flags & PKT_TX_TUNNEL_MASK)) {
/*
* For non tunnel type the tunnel type id is 0, so no need to
* assign a value to it. Only the inner(normal) L2 header length
* inner l2_len. It would lead a cksum error. So driver has to
* calculate the header length.
*/
- if (unlikely(!(m->ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
+ if (unlikely(!(ol_flags &
+ (PKT_TX_OUTER_IP_CKSUM | PKT_TX_OUTER_UDP_CKSUM)) &&
m->outer_l2_len == 0)) {
struct rte_net_hdr_lens hdr_len;
(void)rte_net_get_ptype(m, &hdr_len,
desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(tmp_outer);
desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp_inner);
+ tmp_ol4cs = ol_flags & PKT_TX_OUTER_UDP_CKSUM ?
+ BIT(HNS3_TXD_OL4CS_B) : 0;
+ desc->tx.paylen_fd_dop_ol4cs = rte_cpu_to_le_32(tmp_ol4cs);
return 0;
}
uint32_t tmp;
/* Enable L4 checksum offloads */
switch (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG)) {
+ case PKT_TX_TCP_CKSUM | PKT_TX_TCP_SEG:
case PKT_TX_TCP_CKSUM:
case PKT_TX_TCP_SEG:
tmp = *type_cs_vlan_tso_len;
return false;
}
+static bool
+hns3_outer_ipv4_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags,
+ uint32_t *l4_proto)
+{
+ struct rte_ipv4_hdr *ipv4_hdr;
+ ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
+ m->outer_l2_len);
+ if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ ipv4_hdr->hdr_checksum = 0;
+ if (ol_flags & PKT_TX_OUTER_UDP_CKSUM) {
+ struct rte_udp_hdr *udp_hdr;
+ /*
+ * If OUTER_UDP_CKSUM is support, HW can caclulate the pseudo
+ * header for TSO packets
+ */
+ if (ol_flags & PKT_TX_TCP_SEG)
+ return true;
+ udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
+ m->outer_l2_len + m->outer_l3_len);
+ udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
+
+ return true;
+ }
+ *l4_proto = ipv4_hdr->next_proto_id;
+ return false;
+}
+
+static bool
+hns3_outer_ipv6_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags,
+ uint32_t *l4_proto)
+{
+ struct rte_ipv6_hdr *ipv6_hdr;
+ ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
+ m->outer_l2_len);
+ if (ol_flags & PKT_TX_OUTER_UDP_CKSUM) {
+ struct rte_udp_hdr *udp_hdr;
+ /*
+ * If OUTER_UDP_CKSUM is support, HW can caclulate the pseudo
+ * header for TSO packets
+ */
+ if (ol_flags & PKT_TX_TCP_SEG)
+ return true;
+ udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
+ m->outer_l2_len + m->outer_l3_len);
+ udp_hdr->dgram_cksum = rte_ipv6_phdr_cksum(ipv6_hdr, ol_flags);
+
+ return true;
+ }
+ *l4_proto = ipv6_hdr->proto;
+ return false;
+}
+
static void
hns3_outer_header_cksum_prepare(struct rte_mbuf *m)
{
uint64_t ol_flags = m->ol_flags;
uint32_t paylen, hdr_len, l4_proto;
+ struct rte_udp_hdr *udp_hdr;
if (!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6)))
return;
if (ol_flags & PKT_TX_OUTER_IPV4) {
- struct rte_ipv4_hdr *ipv4_hdr;
- ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
- m->outer_l2_len);
- l4_proto = ipv4_hdr->next_proto_id;
- if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
- ipv4_hdr->hdr_checksum = 0;
+ if (hns3_outer_ipv4_cksum_prepared(m, ol_flags, &l4_proto))
+ return;
} else {
- struct rte_ipv6_hdr *ipv6_hdr;
- ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
- m->outer_l2_len);
- l4_proto = ipv6_hdr->proto;
+ if (hns3_outer_ipv6_cksum_prepared(m, ol_flags, &l4_proto))
+ return;
}
+
/* driver should ensure the outer udp cksum is 0 for TUNNEL TSO */
if (l4_proto == IPPROTO_UDP && (ol_flags & PKT_TX_TCP_SEG)) {
- struct rte_udp_hdr *udp_hdr;
hdr_len = m->l2_len + m->l3_len + m->l4_len;
hdr_len += m->outer_l2_len + m->outer_l3_len;
paylen = m->pkt_len - hdr_len;
if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK) {
/* Fill in tunneling parameters if necessary */
if (hns3_parse_tunneling_params(txq, m, tx_desc_id)) {
- txq->unsupported_tunnel_pkt_cnt++;
+ txq->dfx_stats.unsupported_tunnel_pkt_cnt++;
return -EINVAL;
}
* driver support, the packet will be ignored.
*/
if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN)) {
- txq->over_length_pkt_cnt++;
+ txq->dfx_stats.over_length_pkt_cnt++;
return -EINVAL;
}
max_non_tso_bd_num = txq->max_non_tso_bd_num;
if (unlikely(nb_buf > max_non_tso_bd_num)) {
- txq->exceed_limit_bd_pkt_cnt++;
+ txq->dfx_stats.exceed_limit_bd_pkt_cnt++;
ret = hns3_reassemble_tx_pkts(tx_pkt, &new_pkt,
max_non_tso_bd_num);
if (ret) {
- txq->exceed_limit_bd_reassem_fail++;
+ txq->dfx_stats.exceed_limit_bd_reassem_fail++;
return ret;
}
*m_seg = new_pkt;
dma_addr = rte_mbuf_data_iova(*pkts);
txdp->addr = rte_cpu_to_le_64(dma_addr);
txdp->tx.send_size = rte_cpu_to_le_16((*pkts)->data_len);
- txdp->tx.paylen = 0;
+ txdp->tx.paylen_fd_dop_ol4cs = 0;
txdp->tx.type_cs_vlan_tso_len = 0;
txdp->tx.ol_type_vlan_len_msec = 0;
txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag);
dma_addr = rte_mbuf_data_iova(*pkts);
txdp->addr = rte_cpu_to_le_64(dma_addr);
txdp->tx.send_size = rte_cpu_to_le_16((*pkts)->data_len);
- txdp->tx.paylen = 0;
+ txdp->tx.paylen_fd_dop_ol4cs = 0;
txdp->tx.type_cs_vlan_tso_len = 0;
txdp->tx.ol_type_vlan_len_msec = 0;
txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag);
for (i = 0; i < mainpart; i += PER_LOOP_NUM) {
hns3_tx_backup_4mbuf(tx_entry + i, pkts + i);
hns3_tx_setup_4bd(txdp + i, pkts + i);
+
+ /* Increment bytes counter */
+ uint32_t j;
+ for (j = 0; j < PER_LOOP_NUM; j++)
+ txq->basic_stats.bytes += pkts[i + j]->pkt_len;
}
if (unlikely(leftover > 0)) {
for (i = 0; i < leftover; i++) {
pkts + mainpart + i);
hns3_tx_setup_1bd(txdp + mainpart + i,
pkts + mainpart + i);
+
+ /* Increment bytes counter */
+ txq->basic_stats.bytes += pkts[mainpart + i]->pkt_len;
}
}
}
nb_pkts = RTE_MIN(txq->tx_bd_ready, nb_pkts);
if (unlikely(nb_pkts == 0)) {
if (txq->tx_bd_ready == 0)
- txq->queue_full_cnt++;
+ txq->dfx_stats.queue_full_cnt++;
return 0;
}
nb_buf = tx_pkt->nb_segs;
if (nb_buf > txq->tx_bd_ready) {
- txq->queue_full_cnt++;
+ txq->dfx_stats.queue_full_cnt++;
if (nb_tx == 0)
return 0;
rte_pktmbuf_pkt_len(tx_pkt);
appended = rte_pktmbuf_append(tx_pkt, add_len);
if (appended == NULL) {
- txq->pkt_padding_fail_cnt++;
+ txq->dfx_stats.pkt_padding_fail_cnt++;
break;
}
desc->tx.tp_fe_sc_vld_ra_ri |=
rte_cpu_to_le_16(BIT(HNS3_TXD_FE_B));
+ /* Increment bytes counter */
+ txq->basic_stats.bytes += tx_pkt->pkt_len;
nb_hold += i;
txq->next_to_use = tx_next_use;
txq->tx_bd_ready -= i;
{
uint64_t offloads = dev->data->dev_conf.txmode.offloads;
struct hns3_adapter *hns = dev->data->dev_private;
+ bool vec_allowed, sve_allowed, simple_allowed;
- if (hns->tx_vec_allowed && hns3_tx_check_vec_support(dev) == 0) {
- *prep = NULL;
- return hns3_check_sve_support() ? hns3_xmit_pkts_vec_sve :
- hns3_xmit_pkts_vec;
- }
+ vec_allowed = hns->tx_vec_allowed &&
+ hns3_tx_check_vec_support(dev) == 0;
+ sve_allowed = vec_allowed && hns3_check_sve_support();
+ simple_allowed = hns->tx_simple_allowed &&
+ offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
+
+ *prep = NULL;
- if (hns->tx_simple_allowed &&
- offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)) {
- *prep = NULL;
+ if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed)
+ return hns3_xmit_pkts_vec;
+ if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_SVE && sve_allowed)
+ return hns3_xmit_pkts_vec_sve;
+ if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_SIMPLE && simple_allowed)
return hns3_xmit_pkts_simple;
+ if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_COMMON) {
+ *prep = hns3_prep_pkts;
+ return hns3_xmit_pkts;
}
+ if (vec_allowed)
+ return hns3_xmit_pkts_vec;
+ if (simple_allowed)
+ return hns3_xmit_pkts_simple;
+
*prep = hns3_prep_pkts;
return hns3_xmit_pkts;
}
eth_tx_prep_t prep = NULL;
if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
- rte_atomic16_read(&hns->hw.reset.resetting) == 0) {
+ __atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) {
eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev);
+ eth_dev->rx_descriptor_status = hns3_dev_rx_descriptor_status;
eth_dev->tx_pkt_burst = hns3_get_tx_function(eth_dev, &prep);
eth_dev->tx_pkt_prepare = prep;
+ eth_dev->tx_descriptor_status = hns3_dev_tx_descriptor_status;
} else {
eth_dev->rx_pkt_burst = hns3_dummy_rxtx_burst;
eth_dev->tx_pkt_burst = hns3_dummy_rxtx_burst;
return 0;
}
+static int
+hns3_tx_done_cleanup_full(struct hns3_tx_queue *txq, uint32_t free_cnt)
+{
+ uint16_t next_to_clean = txq->next_to_clean;
+ uint16_t next_to_use = txq->next_to_use;
+ uint16_t tx_bd_ready = txq->tx_bd_ready;
+ struct hns3_entry *tx_pkt = &txq->sw_ring[next_to_clean];
+ struct hns3_desc *desc = &txq->tx_ring[next_to_clean];
+ uint32_t idx;
+
+ if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
+ free_cnt = txq->nb_tx_desc;
+
+ for (idx = 0; idx < free_cnt; idx++) {
+ if (next_to_clean == next_to_use)
+ break;
+
+ if (desc->tx.tp_fe_sc_vld_ra_ri &
+ rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B)))
+ break;
+
+ if (tx_pkt->mbuf != NULL) {
+ rte_pktmbuf_free_seg(tx_pkt->mbuf);
+ tx_pkt->mbuf = NULL;
+ }
+
+ next_to_clean++;
+ tx_bd_ready++;
+ tx_pkt++;
+ desc++;
+ if (next_to_clean == txq->nb_tx_desc) {
+ tx_pkt = txq->sw_ring;
+ desc = txq->tx_ring;
+ next_to_clean = 0;
+ }
+ }
+
+ if (idx > 0) {
+ txq->next_to_clean = next_to_clean;
+ txq->tx_bd_ready = tx_bd_ready;
+ }
+
+ return (int)idx;
+}
+
+int
+hns3_tx_done_cleanup(void *txq, uint32_t free_cnt)
+{
+ struct hns3_tx_queue *q = (struct hns3_tx_queue *)txq;
+ struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
+
+ if (dev->tx_pkt_burst == hns3_xmit_pkts)
+ return hns3_tx_done_cleanup_full(q, free_cnt);
+ else if (dev->tx_pkt_burst == hns3_dummy_rxtx_burst)
+ return 0;
+ else
+ return -ENOTSUP;
+}
+
+int
+hns3_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ volatile struct hns3_desc *rxdp;
+ struct hns3_rx_queue *rxq;
+ struct rte_eth_dev *dev;
+ uint32_t bd_base_info;
+ uint16_t desc_id;
+
+ rxq = (struct hns3_rx_queue *)rx_queue;
+ if (offset >= rxq->nb_rx_desc)
+ return -EINVAL;
+
+ desc_id = (rxq->next_to_use + offset) % rxq->nb_rx_desc;
+ rxdp = &rxq->rx_ring[desc_id];
+ bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
+ dev = &rte_eth_devices[rxq->port_id];
+ if (dev->rx_pkt_burst == hns3_recv_pkts ||
+ dev->rx_pkt_burst == hns3_recv_scattered_pkts) {
+ if (offset >= rxq->nb_rx_desc - rxq->rx_free_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+ } else if (dev->rx_pkt_burst == hns3_recv_pkts_vec ||
+ dev->rx_pkt_burst == hns3_recv_pkts_vec_sve){
+ if (offset >= rxq->nb_rx_desc - rxq->rx_rearm_nb)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+ } else {
+ return RTE_ETH_RX_DESC_UNAVAIL;
+ }
+
+ if (!(bd_base_info & BIT(HNS3_RXD_VLD_B)))
+ return RTE_ETH_RX_DESC_AVAIL;
+ else
+ return RTE_ETH_RX_DESC_DONE;
+}
+
+int
+hns3_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ volatile struct hns3_desc *txdp;
+ struct hns3_tx_queue *txq;
+ struct rte_eth_dev *dev;
+ uint16_t desc_id;
+
+ txq = (struct hns3_tx_queue *)tx_queue;
+ if (offset >= txq->nb_tx_desc)
+ return -EINVAL;
+
+ dev = &rte_eth_devices[txq->port_id];
+ if (dev->tx_pkt_burst != hns3_xmit_pkts_simple &&
+ dev->tx_pkt_burst != hns3_xmit_pkts &&
+ dev->tx_pkt_burst != hns3_xmit_pkts_vec_sve &&
+ dev->tx_pkt_burst != hns3_xmit_pkts_vec)
+ return RTE_ETH_TX_DESC_UNAVAIL;
+
+ desc_id = (txq->next_to_use + offset) % txq->nb_tx_desc;
+ txdp = &txq->tx_ring[desc_id];
+ if (txdp->tx.tp_fe_sc_vld_ra_ri & rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B)))
+ return RTE_ETH_TX_DESC_FULL;
+ else
+ return RTE_ETH_TX_DESC_DONE;
+}
+
uint32_t
hns3_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
else
return fbd_num - driver_hold_bd_num;
}
+
+void
+hns3_enable_rxd_adv_layout(struct hns3_hw *hw)
+{
+ /*
+ * If the hardware support rxd advanced layout, then driver enable it
+ * default.
+ */
+ if (hns3_dev_rxd_adv_layout_supported(hw))
+ hns3_write_dev(hw, HNS3_RXD_ADV_LAYOUT_EN_REG, 1);
+}