X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fhns3%2Fhns3_rxtx.c;h=222cf8a4bfcbeb68635e1c8cf9fd61d2e6ed2852;hb=e5e9ef729c8815e6eb0afbece4cc67e6500e7477;hp=33864535b637d41735de5697c5fb08854b5337e7;hpb=708ecc07d23edff628008669401cfaf37a503ce8;p=dpdk.git diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c index 33864535b6..222cf8a4bf 100644 --- a/drivers/net/hns3/hns3_rxtx.c +++ b/drivers/net/hns3/hns3_rxtx.c @@ -2,28 +2,15 @@ * Copyright(c) 2018-2019 Hisilicon Limited. */ -#include -#include -#include -#include -#include -#include #include -#include #include #include -#include -#include -#include #include -#include +#include #include -#include -#include #include #include -#include -#if defined(RTE_ARCH_ARM64) && defined(CC_SVE_SUPPORT) +#if defined(RTE_ARCH_ARM64) && defined(__ARM_FEATURE_SVE) #include #endif @@ -269,7 +256,7 @@ hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq) for (i = 0; i < rxq->nb_rx_desc; i++) { mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); if (unlikely(mbuf == NULL)) { - hns3_err(hw, "Failed to allocate RXD[%d] for rx queue!", + hns3_err(hw, "Failed to allocate RXD[%u] for rx queue!", i); hns3_rx_queue_release_mbufs(rxq); return -ENOMEM; @@ -366,6 +353,19 @@ hns3_update_all_queues_pvid_proc_en(struct hns3_hw *hw) } } +static void +hns3_stop_unused_queue(void *tqp_base, enum hns3_ring_type queue_type) +{ + uint32_t reg_offset; + uint32_t reg; + + reg_offset = queue_type == HNS3_RING_TYPE_TX ? + HNS3_RING_TX_EN_REG : HNS3_RING_RX_EN_REG; + reg = hns3_read_reg(tqp_base, reg_offset); + reg &= ~BIT(HNS3_RING_EN_B); + hns3_write_reg(tqp_base, reg_offset, reg); +} + void hns3_enable_all_queues(struct hns3_hw *hw, bool en) { @@ -381,16 +381,22 @@ hns3_enable_all_queues(struct hns3_hw *hw, bool en) if (hns3_dev_indep_txrx_supported(hw)) { rxq = i < nb_rx_q ? hw->data->rx_queues[i] : NULL; txq = i < nb_tx_q ? hw->data->tx_queues[i] : NULL; + + tqp_base = (void *)((char *)hw->io_base + + hns3_get_tqp_reg_offset(i)); /* - * After initialization, rxq and txq won't be NULL at - * the same time. + * If queue struct is not initialized, it means the + * related HW ring has not been initialized yet. + * So, these queues should be disabled before enable + * the tqps to avoid a HW exception since the queues + * are enabled by default. */ - if (rxq != NULL) - tqp_base = rxq->io_base; - else if (txq != NULL) - tqp_base = txq->io_base; - else - return; + if (rxq == NULL) + hns3_stop_unused_queue(tqp_base, + HNS3_RING_TYPE_RX); + if (txq == NULL) + hns3_stop_unused_queue(tqp_base, + HNS3_RING_TYPE_TX); } else { rxq = i < nb_rx_q ? hw->data->rx_queues[i] : hw->fkq_data.rx_queues[i - nb_rx_q]; @@ -515,6 +521,26 @@ start_rxqs_fail: return -EINVAL; } +void +hns3_restore_tqp_enable_state(struct hns3_hw *hw) +{ + struct hns3_rx_queue *rxq; + struct hns3_tx_queue *txq; + uint16_t i; + + for (i = 0; i < hw->data->nb_rx_queues; i++) { + rxq = hw->data->rx_queues[i]; + if (rxq != NULL) + hns3_enable_rxq(rxq, rxq->enabled); + } + + for (i = 0; i < hw->data->nb_tx_queues; i++) { + txq = hw->data->tx_queues[i]; + if (txq != NULL) + hns3_enable_txq(txq, txq->enabled); + } +} + void hns3_stop_all_txqs(struct rte_eth_dev *dev) { @@ -808,6 +834,24 @@ queue_reset_fail: return ret; } +uint32_t +hns3_get_tqp_intr_reg_offset(uint16_t tqp_intr_id) +{ + uint32_t reg_offset; + + /* Need an extend offset to config queues > 64 */ + if (tqp_intr_id < HNS3_MIN_EXT_TQP_INTR_ID) + reg_offset = HNS3_TQP_INTR_REG_BASE + + tqp_intr_id * HNS3_TQP_INTR_LOW_ORDER_OFFSET; + else + reg_offset = HNS3_TQP_INTR_EXT_REG_BASE + + tqp_intr_id / HNS3_MIN_EXT_TQP_INTR_ID * + HNS3_TQP_INTR_HIGH_ORDER_OFFSET + + tqp_intr_id % HNS3_MIN_EXT_TQP_INTR_ID * + HNS3_TQP_INTR_LOW_ORDER_OFFSET; + + return reg_offset; +} void hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id, @@ -821,7 +865,7 @@ hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id, if (gl_idx >= RTE_DIM(offset) || gl_value > HNS3_TQP_INTR_GL_MAX) return; - addr = offset[gl_idx] + queue_id * HNS3_TQP_INTR_REG_SIZE; + addr = offset[gl_idx] + hns3_get_tqp_intr_reg_offset(queue_id); if (hw->intr.gl_unit == HNS3_INTR_COALESCE_GL_UINT_1US) value = gl_value | HNS3_TQP_INTR_GL_UNIT_1US; else @@ -838,7 +882,7 @@ hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, uint16_t rl_value) if (rl_value > HNS3_TQP_INTR_RL_MAX) return; - addr = HNS3_TQP_INTR_RL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE; + addr = HNS3_TQP_INTR_RL_REG + hns3_get_tqp_intr_reg_offset(queue_id); value = HNS3_RL_USEC_TO_REG(rl_value); if (value > 0) value |= HNS3_TQP_INTR_RL_ENABLE_MASK; @@ -851,13 +895,18 @@ hns3_set_queue_intr_ql(struct hns3_hw *hw, uint16_t queue_id, uint16_t ql_value) { uint32_t addr; - if (hw->intr.coalesce_mode == HNS3_INTR_COALESCE_NON_QL) + /* + * int_ql_max == 0 means the hardware does not support QL, + * QL regs config is not permitted if QL is not supported, + * here just return. + */ + if (hw->intr.int_ql_max == HNS3_INTR_QL_NONE) return; - addr = HNS3_TQP_INTR_TX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE; + addr = HNS3_TQP_INTR_TX_QL_REG + hns3_get_tqp_intr_reg_offset(queue_id); hns3_write_dev(hw, addr, ql_value); - addr = HNS3_TQP_INTR_RX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE; + addr = HNS3_TQP_INTR_RX_QL_REG + hns3_get_tqp_intr_reg_offset(queue_id); hns3_write_dev(hw, addr, ql_value); } @@ -866,7 +915,7 @@ hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en) { uint32_t addr, value; - addr = HNS3_TQP_INTR_CTRL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE; + addr = HNS3_TQP_INTR_CTRL_REG + hns3_get_tqp_intr_reg_offset(queue_id); value = en ? 1 : 0; hns3_write_dev(hw, addr, value); @@ -1200,7 +1249,7 @@ hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev, rxq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_rx_queue), RTE_CACHE_LINE_SIZE, q_info->socket_id); if (rxq == NULL) { - hns3_err(hw, "Failed to allocate memory for No.%d rx ring!", + hns3_err(hw, "Failed to allocate memory for No.%u rx ring!", q_info->idx); return NULL; } @@ -1219,7 +1268,7 @@ hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev, rx_desc, HNS3_RING_BASE_ALIGN, q_info->socket_id); if (rx_mz == NULL) { - hns3_err(hw, "Failed to reserve DMA memory for No.%d rx ring!", + hns3_err(hw, "Failed to reserve DMA memory for No.%u rx ring!", q_info->idx); hns3_rx_queue_release(rxq); return NULL; @@ -1228,7 +1277,7 @@ hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev, rxq->rx_ring = (struct hns3_desc *)rx_mz->addr; rxq->rx_ring_phys_addr = rx_mz->iova; - hns3_dbg(hw, "No.%d rx descriptors iova 0x%" PRIx64, q_info->idx, + hns3_dbg(hw, "No.%u rx descriptors iova 0x%" PRIx64, q_info->idx, rxq->rx_ring_phys_addr); return rxq; @@ -1256,7 +1305,7 @@ hns3_fake_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, q_info.ring_name = "rx_fake_ring"; rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info); if (rxq == NULL) { - hns3_err(hw, "Failed to setup No.%d fake rx ring.", idx); + hns3_err(hw, "Failed to setup No.%u fake rx ring.", idx); return -ENOMEM; } @@ -1293,7 +1342,7 @@ hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev, txq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_tx_queue), RTE_CACHE_LINE_SIZE, q_info->socket_id); if (txq == NULL) { - hns3_err(hw, "Failed to allocate memory for No.%d tx ring!", + hns3_err(hw, "Failed to allocate memory for No.%u tx ring!", q_info->idx); return NULL; } @@ -1306,7 +1355,7 @@ hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev, tx_desc, HNS3_RING_BASE_ALIGN, q_info->socket_id); if (tx_mz == NULL) { - hns3_err(hw, "Failed to reserve DMA memory for No.%d tx ring!", + hns3_err(hw, "Failed to reserve DMA memory for No.%u tx ring!", q_info->idx); hns3_tx_queue_release(txq); return NULL; @@ -1315,7 +1364,7 @@ hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev, txq->tx_ring = (struct hns3_desc *)tx_mz->addr; txq->tx_ring_phys_addr = tx_mz->iova; - hns3_dbg(hw, "No.%d tx descriptors iova 0x%" PRIx64, q_info->idx, + hns3_dbg(hw, "No.%u tx descriptors iova 0x%" PRIx64, q_info->idx, txq->tx_ring_phys_addr); /* Clear tx bd */ @@ -1350,7 +1399,7 @@ hns3_fake_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, q_info.ring_name = "tx_fake_ring"; txq = hns3_alloc_txq_and_dma_zone(dev, &q_info); if (txq == NULL) { - hns3_err(hw, "Failed to setup No.%d fake tx ring.", idx); + hns3_err(hw, "Failed to setup No.%u fake tx ring.", idx); return -ENOMEM; } @@ -1572,7 +1621,6 @@ hns3_rx_buf_len_calc(struct rte_mempool *mp, uint16_t *rx_buf_len) vld_buf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM); - if (vld_buf_size < HNS3_MIN_BD_BUF_SIZE) return -EINVAL; @@ -1762,12 +1810,9 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, rxq->io_head_reg = (volatile void *)((char *)rxq->io_base + HNS3_RING_RX_HEAD_REG); rxq->rx_buf_len = rx_buf_size; - rxq->l2_errors = 0; - rxq->pkt_len_errors = 0; - rxq->l3_csum_errors = 0; - rxq->l4_csum_errors = 0; - rxq->ol3_csum_errors = 0; - rxq->ol4_csum_errors = 0; + memset(&rxq->basic_stats, 0, sizeof(struct hns3_rx_basic_stats)); + memset(&rxq->err_stats, 0, sizeof(struct hns3_rx_bd_errors_stats)); + memset(&rxq->dfx_stats, 0, sizeof(struct hns3_rx_dfx_stats)); /* CRC len set here is used for amending packet length */ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) @@ -2437,7 +2482,7 @@ hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, static bool hns3_check_sve_support(void) { -#if defined(RTE_ARCH_ARM64) && defined(CC_SVE_SUPPORT) +#if defined(RTE_ARCH_ARM64) && defined(__ARM_FEATURE_SVE) if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SVE)) return true; #endif @@ -2483,8 +2528,8 @@ hns3_tx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_txconf *conf, if (rs_thresh + free_thresh > nb_desc || nb_desc % rs_thresh || rs_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP || free_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP) { - hns3_err(hw, "tx_rs_thresh (%d) tx_free_thresh (%d) nb_desc " - "(%d) of tx descriptors for port=%d queue=%d check " + hns3_err(hw, "tx_rs_thresh (%u) tx_free_thresh (%u) nb_desc " + "(%u) of tx descriptors for port=%u queue=%u check " "fail!", rs_thresh, free_thresh, nb_desc, hw->data->port_id, idx); @@ -2592,12 +2637,9 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, HNS3_RING_TX_TAIL_REG); txq->min_tx_pkt_len = hw->min_tx_pkt_len; txq->tso_mode = hw->tso_mode; - txq->over_length_pkt_cnt = 0; - txq->exceed_limit_bd_pkt_cnt = 0; - txq->exceed_limit_bd_reassem_fail = 0; - txq->unsupported_tunnel_pkt_cnt = 0; - txq->queue_full_cnt = 0; - txq->pkt_padding_fail_cnt = 0; + memset(&txq->basic_stats, 0, sizeof(struct hns3_tx_basic_stats)); + memset(&txq->dfx_stats, 0, sizeof(struct hns3_tx_dfx_stats)); + rte_spinlock_lock(&hw->lock); dev->data->tx_queues[idx] = txq; rte_spinlock_unlock(&hw->lock); @@ -2641,44 +2683,6 @@ hns3_tx_free_useless_buffer(struct hns3_tx_queue *txq) txq->tx_bd_ready = tx_bd_ready; } -static int -hns3_tso_proc_tunnel(struct hns3_desc *desc, uint64_t ol_flags, - struct rte_mbuf *rxm, uint8_t *l2_len) -{ - uint64_t tun_flags; - uint8_t ol4_len; - uint32_t otmp; - - tun_flags = ol_flags & PKT_TX_TUNNEL_MASK; - if (tun_flags == 0) - return 0; - - otmp = rte_le_to_cpu_32(desc->tx.ol_type_vlan_len_msec); - switch (tun_flags) { - case PKT_TX_TUNNEL_GENEVE: - case PKT_TX_TUNNEL_VXLAN: - *l2_len = rxm->l2_len - RTE_ETHER_VXLAN_HLEN; - break; - case PKT_TX_TUNNEL_GRE: - /* - * OL4 header size, defined in 4 Bytes, it contains outer - * L4(GRE) length and tunneling length. - */ - ol4_len = hns3_get_field(otmp, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S); - *l2_len = rxm->l2_len - (ol4_len << HNS3_L4_LEN_UNIT); - break; - default: - /* For non UDP / GRE tunneling, drop the tunnel packet */ - return -EINVAL; - } - hns3_set_field(otmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S, - rxm->outer_l2_len >> HNS3_L2_LEN_UNIT); - desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(otmp); - - return 0; -} - int hns3_config_gro(struct hns3_hw *hw, bool en) { @@ -2723,31 +2727,15 @@ hns3_pkt_is_tso(struct rte_mbuf *m) } static void -hns3_set_tso(struct hns3_desc *desc, uint64_t ol_flags, - uint32_t paylen, struct rte_mbuf *rxm) +hns3_set_tso(struct hns3_desc *desc, uint32_t paylen, struct rte_mbuf *rxm) { - uint8_t l2_len = rxm->l2_len; - uint32_t tmp; - if (!hns3_pkt_is_tso(rxm)) return; - if (hns3_tso_proc_tunnel(desc, ol_flags, rxm, &l2_len)) - return; - if (paylen <= rxm->tso_segsz) return; - tmp = rte_le_to_cpu_32(desc->tx.type_cs_vlan_tso_len); - hns3_set_bit(tmp, HNS3_TXD_TSO_B, 1); - hns3_set_bit(tmp, HNS3_TXD_L3CS_B, 1); - hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, HNS3_L4T_TCP); - hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1); - hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, - sizeof(struct rte_tcp_hdr) >> HNS3_L4_LEN_UNIT); - hns3_set_field(tmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S, - l2_len >> HNS3_L2_LEN_UNIT); - desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp); + desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(BIT(HNS3_TXD_TSO_B)); desc->tx.mss = rte_cpu_to_le_16(rxm->tso_segsz); } @@ -2772,7 +2760,7 @@ hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc, rxm->outer_l2_len + rxm->outer_l3_len : 0; paylen = rxm->pkt_len - hdr_len; desc->tx.paylen = rte_cpu_to_le_32(paylen); - hns3_set_tso(desc, ol_flags, paylen, rxm); + hns3_set_tso(desc, paylen, rxm); /* * Currently, hardware doesn't support more than two layers VLAN offload @@ -2917,180 +2905,213 @@ hns3_reassemble_tx_pkts(struct rte_mbuf *tx_pkt, struct rte_mbuf **new_pkt, } static void -hns3_parse_outer_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec) +hns3_parse_outer_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec) { uint32_t tmp = *ol_type_vlan_len_msec; + uint64_t ol_flags = m->ol_flags; /* (outer) IP header type */ if (ol_flags & PKT_TX_OUTER_IPV4) { - /* OL3 header size, defined in 4 bytes */ - hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S, - sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT); if (ol_flags & PKT_TX_OUTER_IP_CKSUM) - hns3_set_field(tmp, HNS3_TXD_OL3T_M, - HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_CSUM); + tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M, + HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_CSUM); else - hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, - HNS3_OL3T_IPV4_NO_CSUM); + tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M, + HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_NO_CSUM); } else if (ol_flags & PKT_TX_OUTER_IPV6) { - hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, - HNS3_OL3T_IPV6); - /* OL3 header size, defined in 4 bytes */ - hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S, - sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT); + tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV6); } - + /* OL3 header size, defined in 4 bytes */ + tmp |= hns3_gen_field_val(HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S, + m->outer_l3_len >> HNS3_L3_LEN_UNIT); *ol_type_vlan_len_msec = tmp; } static int -hns3_parse_inner_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec, - struct rte_net_hdr_lens *hdr_lens) +hns3_parse_inner_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec, + uint32_t *type_cs_vlan_tso_len) { - uint32_t tmp = *ol_type_vlan_len_msec; - uint8_t l4_len; - - /* OL2 header size, defined in 2 bytes */ - hns3_set_field(tmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S, - sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT); +#define HNS3_NVGRE_HLEN 8 + uint32_t tmp_outer = *ol_type_vlan_len_msec; + uint32_t tmp_inner = *type_cs_vlan_tso_len; + uint64_t ol_flags = m->ol_flags; + uint16_t inner_l2_len; - /* L4TUNT: L4 Tunneling Type */ switch (ol_flags & PKT_TX_TUNNEL_MASK) { + case PKT_TX_TUNNEL_VXLAN_GPE: case PKT_TX_TUNNEL_GENEVE: case PKT_TX_TUNNEL_VXLAN: - /* MAC in UDP tunnelling packet, include VxLAN */ - hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S, - HNS3_TUN_MAC_IN_UDP); + /* MAC in UDP tunnelling packet, include VxLAN and GENEVE */ + tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M, + HNS3_TXD_TUNTYPE_S, HNS3_TUN_MAC_IN_UDP); /* - * OL4 header size, defined in 4 Bytes, it contains outer - * L4(UDP) length and tunneling length. + * The inner l2 length of mbuf is the sum of outer l4 length, + * tunneling header length and inner l2 length for a tunnel + * packect. But in hns3 tx descriptor, the tunneling header + * length is contained in the field of outer L4 length. + * Therefore, driver need to calculate the outer L4 length and + * inner L2 length. */ - hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, - (uint8_t)RTE_ETHER_VXLAN_HLEN >> - HNS3_L4_LEN_UNIT); + tmp_outer |= hns3_gen_field_val(HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, + (uint8_t)RTE_ETHER_VXLAN_HLEN >> + HNS3_L4_LEN_UNIT); + + inner_l2_len = m->l2_len - RTE_ETHER_VXLAN_HLEN; break; case PKT_TX_TUNNEL_GRE: - hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S, - HNS3_TUN_NVGRE); + tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M, + HNS3_TXD_TUNTYPE_S, HNS3_TUN_NVGRE); /* - * OL4 header size, defined in 4 Bytes, it contains outer - * L4(GRE) length and tunneling length. + * For NVGRE tunnel packect, the outer L4 is empty. So only + * fill the NVGRE header length to the outer L4 field. */ - l4_len = hdr_lens->l4_len + hdr_lens->tunnel_len; - hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, - l4_len >> HNS3_L4_LEN_UNIT); + tmp_outer |= hns3_gen_field_val(HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, + (uint8_t)HNS3_NVGRE_HLEN >> HNS3_L4_LEN_UNIT); + + inner_l2_len = m->l2_len - HNS3_NVGRE_HLEN; break; default: /* For non UDP / GRE tunneling, drop the tunnel packet */ return -EINVAL; } - *ol_type_vlan_len_msec = tmp; + tmp_inner |= hns3_gen_field_val(HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S, + inner_l2_len >> HNS3_L2_LEN_UNIT); + /* OL2 header size, defined in 2 bytes */ + tmp_outer |= hns3_gen_field_val(HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S, + m->outer_l2_len >> HNS3_L2_LEN_UNIT); + + *type_cs_vlan_tso_len = tmp_inner; + *ol_type_vlan_len_msec = tmp_outer; return 0; } static int -hns3_parse_tunneling_params(struct hns3_tx_queue *txq, uint16_t tx_desc_id, - uint64_t ol_flags, - struct rte_net_hdr_lens *hdr_lens) +hns3_parse_tunneling_params(struct hns3_tx_queue *txq, struct rte_mbuf *m, + uint16_t tx_desc_id) { struct hns3_desc *tx_ring = txq->tx_ring; struct hns3_desc *desc = &tx_ring[tx_desc_id]; - uint32_t value = 0; + uint32_t tmp_outer = 0; + uint32_t tmp_inner = 0; int ret; - hns3_parse_outer_params(ol_flags, &value); - ret = hns3_parse_inner_params(ol_flags, &value, hdr_lens); - if (ret) - return -EINVAL; + /* + * The tunnel header is contained in the inner L2 header field of the + * mbuf, but for hns3 descriptor, it is contained in the outer L4. So, + * there is a need that switching between them. To avoid multiple + * calculations, the length of the L2 header include the outer and + * inner, will be filled during the parsing of tunnel packects. + */ + if (!(m->ol_flags & PKT_TX_TUNNEL_MASK)) { + /* + * For non tunnel type the tunnel type id is 0, so no need to + * assign a value to it. Only the inner(normal) L2 header length + * is assigned. + */ + tmp_inner |= hns3_gen_field_val(HNS3_TXD_L2LEN_M, + HNS3_TXD_L2LEN_S, m->l2_len >> HNS3_L2_LEN_UNIT); + } else { + /* + * If outer csum is not offload, the outer length may be filled + * with 0. And the length of the outer header is added to the + * inner l2_len. It would lead a cksum error. So driver has to + * calculate the header length. + */ + if (unlikely(!(m->ol_flags & PKT_TX_OUTER_IP_CKSUM) && + m->outer_l2_len == 0)) { + struct rte_net_hdr_lens hdr_len; + (void)rte_net_get_ptype(m, &hdr_len, + RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK); + m->outer_l3_len = hdr_len.l3_len; + m->outer_l2_len = hdr_len.l2_len; + m->l2_len = m->l2_len - hdr_len.l2_len - hdr_len.l3_len; + } + hns3_parse_outer_params(m, &tmp_outer); + ret = hns3_parse_inner_params(m, &tmp_outer, &tmp_inner); + if (ret) + return -EINVAL; + } - desc->tx.ol_type_vlan_len_msec |= rte_cpu_to_le_32(value); + desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(tmp_outer); + desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp_inner); return 0; } static void -hns3_parse_l3_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len) +hns3_parse_l3_cksum_params(struct rte_mbuf *m, uint32_t *type_cs_vlan_tso_len) { + uint64_t ol_flags = m->ol_flags; + uint32_t l3_type; uint32_t tmp; + tmp = *type_cs_vlan_tso_len; + if (ol_flags & PKT_TX_IPV4) + l3_type = HNS3_L3T_IPV4; + else if (ol_flags & PKT_TX_IPV6) + l3_type = HNS3_L3T_IPV6; + else + l3_type = HNS3_L3T_NONE; + + /* inner(/normal) L3 header size, defined in 4 bytes */ + tmp |= hns3_gen_field_val(HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S, + m->l3_len >> HNS3_L3_LEN_UNIT); + + tmp |= hns3_gen_field_val(HNS3_TXD_L3T_M, HNS3_TXD_L3T_S, l3_type); + /* Enable L3 checksum offloads */ - if (ol_flags & PKT_TX_IPV4) { - tmp = *type_cs_vlan_tso_len; - hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S, - HNS3_L3T_IPV4); - /* inner(/normal) L3 header size, defined in 4 bytes */ - hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S, - sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT); - if (ol_flags & PKT_TX_IP_CKSUM) - hns3_set_bit(tmp, HNS3_TXD_L3CS_B, 1); - *type_cs_vlan_tso_len = tmp; - } else if (ol_flags & PKT_TX_IPV6) { - tmp = *type_cs_vlan_tso_len; - /* L3T, IPv6 don't do checksum */ - hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S, - HNS3_L3T_IPV6); - /* inner(/normal) L3 header size, defined in 4 bytes */ - hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S, - sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT); - *type_cs_vlan_tso_len = tmp; - } + if (ol_flags & PKT_TX_IP_CKSUM) + tmp |= BIT(HNS3_TXD_L3CS_B); + *type_cs_vlan_tso_len = tmp; } static void -hns3_parse_l4_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len) +hns3_parse_l4_cksum_params(struct rte_mbuf *m, uint32_t *type_cs_vlan_tso_len) { + uint64_t ol_flags = m->ol_flags; uint32_t tmp; - /* Enable L4 checksum offloads */ - switch (ol_flags & PKT_TX_L4_MASK) { + switch (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG)) { case PKT_TX_TCP_CKSUM: + case PKT_TX_TCP_SEG: tmp = *type_cs_vlan_tso_len; - hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, - HNS3_L4T_TCP); - hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1); - hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, - sizeof(struct rte_tcp_hdr) >> HNS3_L4_LEN_UNIT); - *type_cs_vlan_tso_len = tmp; + tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, + HNS3_L4T_TCP); break; case PKT_TX_UDP_CKSUM: tmp = *type_cs_vlan_tso_len; - hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, - HNS3_L4T_UDP); - hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1); - hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, - sizeof(struct rte_udp_hdr) >> HNS3_L4_LEN_UNIT); - *type_cs_vlan_tso_len = tmp; + tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, + HNS3_L4T_UDP); break; case PKT_TX_SCTP_CKSUM: tmp = *type_cs_vlan_tso_len; - hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, - HNS3_L4T_SCTP); - hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1); - hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, - sizeof(struct rte_sctp_hdr) >> HNS3_L4_LEN_UNIT); - *type_cs_vlan_tso_len = tmp; + tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, + HNS3_L4T_SCTP); break; default: - break; + return; } + tmp |= BIT(HNS3_TXD_L4CS_B); + tmp |= hns3_gen_field_val(HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, + m->l4_len >> HNS3_L4_LEN_UNIT); + *type_cs_vlan_tso_len = tmp; } static void -hns3_txd_enable_checksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id, - uint64_t ol_flags) +hns3_txd_enable_checksum(struct hns3_tx_queue *txq, struct rte_mbuf *m, + uint16_t tx_desc_id) { struct hns3_desc *tx_ring = txq->tx_ring; struct hns3_desc *desc = &tx_ring[tx_desc_id]; uint32_t value = 0; - /* inner(/normal) L2 header size, defined in 2 bytes */ - hns3_set_field(value, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S, - sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT); - - hns3_parse_l3_cksum_params(ol_flags, &value); - hns3_parse_l4_cksum_params(ol_flags, &value); + hns3_parse_l3_cksum_params(m, &value); + hns3_parse_l4_cksum_params(m, &value); desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(value); } @@ -3151,26 +3172,29 @@ static void hns3_outer_header_cksum_prepare(struct rte_mbuf *m) { uint64_t ol_flags = m->ol_flags; - struct rte_ipv4_hdr *ipv4_hdr; - struct rte_udp_hdr *udp_hdr; - uint32_t paylen, hdr_len; + uint32_t paylen, hdr_len, l4_proto; if (!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6))) return; - if (ol_flags & PKT_TX_IPV4) { + if (ol_flags & PKT_TX_OUTER_IPV4) { + struct rte_ipv4_hdr *ipv4_hdr; ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, m->outer_l2_len); - - if (ol_flags & PKT_TX_IP_CKSUM) + l4_proto = ipv4_hdr->next_proto_id; + if (ol_flags & PKT_TX_OUTER_IP_CKSUM) ipv4_hdr->hdr_checksum = 0; + } else { + struct rte_ipv6_hdr *ipv6_hdr; + ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *, + m->outer_l2_len); + l4_proto = ipv6_hdr->proto; } - - if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM && - ol_flags & PKT_TX_TCP_SEG) { + /* driver should ensure the outer udp cksum is 0 for TUNNEL TSO */ + if (l4_proto == IPPROTO_UDP && (ol_flags & PKT_TX_TCP_SEG)) { + struct rte_udp_hdr *udp_hdr; hdr_len = m->l2_len + m->l3_len + m->l4_len; - hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ? - m->outer_l2_len + m->outer_l3_len : 0; + hdr_len += m->outer_l2_len + m->outer_l3_len; paylen = m->pkt_len - hdr_len; if (paylen <= m->tso_segsz) return; @@ -3329,20 +3353,25 @@ hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, static int hns3_parse_cksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id, - const struct rte_mbuf *m, struct rte_net_hdr_lens *hdr_lens) -{ - /* Fill in tunneling parameters if necessary */ - if (m->ol_flags & PKT_TX_TUNNEL_MASK) { - (void)rte_net_get_ptype(m, hdr_lens, RTE_PTYPE_ALL_MASK); - if (hns3_parse_tunneling_params(txq, tx_desc_id, m->ol_flags, - hdr_lens)) { - txq->unsupported_tunnel_pkt_cnt++; - return -EINVAL; + struct rte_mbuf *m) +{ + struct hns3_desc *tx_ring = txq->tx_ring; + struct hns3_desc *desc = &tx_ring[tx_desc_id]; + + /* Enable checksum offloading */ + if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK) { + /* Fill in tunneling parameters if necessary */ + if (hns3_parse_tunneling_params(txq, m, tx_desc_id)) { + txq->dfx_stats.unsupported_tunnel_pkt_cnt++; + return -EINVAL; } + + hns3_txd_enable_checksum(txq, m, tx_desc_id); + } else { + /* clear the control bit */ + desc->tx.type_cs_vlan_tso_len = 0; + desc->tx.ol_type_vlan_len_msec = 0; } - /* Enable checksum offloading */ - if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK) - hns3_txd_enable_checksum(txq, tx_desc_id, m->ol_flags); return 0; } @@ -3363,17 +3392,17 @@ hns3_check_non_tso_pkt(uint16_t nb_buf, struct rte_mbuf **m_seg, * driver support, the packet will be ignored. */ if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN)) { - txq->over_length_pkt_cnt++; + txq->dfx_stats.over_length_pkt_cnt++; return -EINVAL; } max_non_tso_bd_num = txq->max_non_tso_bd_num; if (unlikely(nb_buf > max_non_tso_bd_num)) { - txq->exceed_limit_bd_pkt_cnt++; + txq->dfx_stats.exceed_limit_bd_pkt_cnt++; ret = hns3_reassemble_tx_pkts(tx_pkt, &new_pkt, max_non_tso_bd_num); if (ret) { - txq->exceed_limit_bd_reassem_fail++; + txq->dfx_stats.exceed_limit_bd_reassem_fail++; return ret; } *m_seg = new_pkt; @@ -3511,7 +3540,7 @@ hns3_xmit_pkts_simple(void *tx_queue, nb_pkts = RTE_MIN(txq->tx_bd_ready, nb_pkts); if (unlikely(nb_pkts == 0)) { if (txq->tx_bd_ready == 0) - txq->queue_full_cnt++; + txq->dfx_stats.queue_full_cnt++; return 0; } @@ -3533,7 +3562,6 @@ hns3_xmit_pkts_simple(void *tx_queue, uint16_t hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { - struct rte_net_hdr_lens hdr_lens = {0}; struct hns3_tx_queue *txq = tx_queue; struct hns3_entry *tx_bak_pkt; struct hns3_desc *tx_ring; @@ -3564,7 +3592,7 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) nb_buf = tx_pkt->nb_segs; if (nb_buf > txq->tx_bd_ready) { - txq->queue_full_cnt++; + txq->dfx_stats.queue_full_cnt++; if (nb_tx == 0) return 0; @@ -3585,7 +3613,7 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) rte_pktmbuf_pkt_len(tx_pkt); appended = rte_pktmbuf_append(tx_pkt, add_len); if (appended == NULL) { - txq->pkt_padding_fail_cnt++; + txq->dfx_stats.pkt_padding_fail_cnt++; break; } @@ -3597,7 +3625,7 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) if (hns3_check_non_tso_pkt(nb_buf, &m_seg, tx_pkt, txq)) goto end_of_tx; - if (hns3_parse_cksum(txq, tx_next_use, m_seg, &hdr_lens)) + if (hns3_parse_cksum(txq, tx_next_use, m_seg)) goto end_of_tx; i = 0; @@ -3728,7 +3756,7 @@ void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev) eth_tx_prep_t prep = NULL; if (hns->hw.adapter_state == HNS3_NIC_STARTED && - rte_atomic16_read(&hns->hw.reset.resetting) == 0) { + __atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) { eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev); eth_dev->tx_pkt_burst = hns3_get_tx_function(eth_dev, &prep); eth_dev->tx_pkt_prepare = prep; @@ -3805,6 +3833,19 @@ hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) return ret; } +static void +hns3_reset_sw_rxq(struct hns3_rx_queue *rxq) +{ + rxq->next_to_use = 0; + rxq->rx_rearm_start = 0; + rxq->rx_free_hold = 0; + rxq->rx_rearm_nb = 0; + rxq->pkt_first_seg = NULL; + rxq->pkt_last_seg = NULL; + memset(&rxq->rx_ring[0], 0, rxq->nb_rx_desc * sizeof(struct hns3_desc)); + hns3_rxq_vec_setup(rxq); +} + int hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) { @@ -3815,7 +3856,10 @@ hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) return -ENOTSUP; hns3_enable_rxq(rxq, false); + hns3_rx_queue_release_mbufs(rxq); + + hns3_reset_sw_rxq(rxq); dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; return 0;