ethdev: replace callback getting filter operations
[dpdk.git] / drivers / net / hns3 / hns3_rxtx.c
index 00167c4..feeb702 100644 (file)
@@ -2689,13 +2689,26 @@ hns3_get_rx_function(struct rte_eth_dev *dev)
 {
        struct hns3_adapter *hns = dev->data->dev_private;
        uint64_t offloads = dev->data->dev_conf.rxmode.offloads;
+       bool vec_allowed, sve_allowed, simple_allowed;
+
+       vec_allowed = hns->rx_vec_allowed &&
+                     hns3_rx_check_vec_support(dev) == 0;
+       sve_allowed = vec_allowed && hns3_check_sve_support();
+       simple_allowed = hns->rx_simple_allowed && !dev->data->scattered_rx &&
+                        (offloads & DEV_RX_OFFLOAD_TCP_LRO) == 0;
+
+       if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed)
+               return hns3_recv_pkts_vec;
+       if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_SVE && sve_allowed)
+               return hns3_recv_pkts_vec_sve;
+       if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_SIMPLE && simple_allowed)
+               return hns3_recv_pkts;
+       if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_COMMON)
+               return hns3_recv_scattered_pkts;
 
-       if (hns->rx_vec_allowed && hns3_rx_check_vec_support(dev) == 0)
-               return hns3_check_sve_support() ? hns3_recv_pkts_vec_sve :
-                      hns3_recv_pkts_vec;
-
-       if (hns->rx_simple_allowed && !dev->data->scattered_rx &&
-           (offloads & DEV_RX_OFFLOAD_TCP_LRO) == 0)
+       if (vec_allowed)
+               return hns3_recv_pkts_vec;
+       if (simple_allowed)
                return hns3_recv_pkts;
 
        return hns3_recv_scattered_pkts;
@@ -2954,7 +2967,7 @@ hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc,
        hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
                           rxm->outer_l2_len + rxm->outer_l3_len : 0;
        paylen = rxm->pkt_len - hdr_len;
-       desc->tx.paylen = rte_cpu_to_le_32(paylen);
+       desc->tx.paylen_fd_dop_ol4cs |= rte_cpu_to_le_32(paylen);
        hns3_set_tso(desc, paylen, rxm);
 
        /*
@@ -3191,8 +3204,10 @@ hns3_parse_tunneling_params(struct hns3_tx_queue *txq, struct rte_mbuf *m,
 {
        struct hns3_desc *tx_ring = txq->tx_ring;
        struct hns3_desc *desc = &tx_ring[tx_desc_id];
+       uint64_t ol_flags = m->ol_flags;
        uint32_t tmp_outer = 0;
        uint32_t tmp_inner = 0;
+       uint32_t tmp_ol4cs;
        int ret;
 
        /*
@@ -3202,7 +3217,7 @@ hns3_parse_tunneling_params(struct hns3_tx_queue *txq, struct rte_mbuf *m,
         * calculations, the length of the L2 header include the outer and
         * inner, will be filled during the parsing of tunnel packects.
         */
-       if (!(m->ol_flags & PKT_TX_TUNNEL_MASK)) {
+       if (!(ol_flags & PKT_TX_TUNNEL_MASK)) {
                /*
                 * For non tunnel type the tunnel type id is 0, so no need to
                 * assign a value to it. Only the inner(normal) L2 header length
@@ -3217,7 +3232,8 @@ hns3_parse_tunneling_params(struct hns3_tx_queue *txq, struct rte_mbuf *m,
                 * inner l2_len. It would lead a cksum error. So driver has to
                 * calculate the header length.
                 */
-               if (unlikely(!(m->ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
+               if (unlikely(!(ol_flags &
+                       (PKT_TX_OUTER_IP_CKSUM | PKT_TX_OUTER_UDP_CKSUM)) &&
                                        m->outer_l2_len == 0)) {
                        struct rte_net_hdr_lens hdr_len;
                        (void)rte_net_get_ptype(m, &hdr_len,
@@ -3234,6 +3250,9 @@ hns3_parse_tunneling_params(struct hns3_tx_queue *txq, struct rte_mbuf *m,
 
        desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(tmp_outer);
        desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp_inner);
+       tmp_ol4cs = ol_flags & PKT_TX_OUTER_UDP_CKSUM ?
+                       BIT(HNS3_TXD_OL4CS_B) : 0;
+       desc->tx.paylen_fd_dop_ol4cs = rte_cpu_to_le_32(tmp_ol4cs);
 
        return 0;
 }
@@ -3363,31 +3382,78 @@ hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num,
        return false;
 }
 
+static bool
+hns3_outer_ipv4_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags,
+                               uint32_t *l4_proto)
+{
+       struct rte_ipv4_hdr *ipv4_hdr;
+       ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
+                                          m->outer_l2_len);
+       if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
+               ipv4_hdr->hdr_checksum = 0;
+       if (ol_flags & PKT_TX_OUTER_UDP_CKSUM) {
+               struct rte_udp_hdr *udp_hdr;
+               /*
+                * If OUTER_UDP_CKSUM is support, HW can caclulate the pseudo
+                * header for TSO packets
+                */
+               if (ol_flags & PKT_TX_TCP_SEG)
+                       return true;
+               udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
+                               m->outer_l2_len + m->outer_l3_len);
+               udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
+
+               return true;
+       }
+       *l4_proto = ipv4_hdr->next_proto_id;
+       return false;
+}
+
+static bool
+hns3_outer_ipv6_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags,
+                               uint32_t *l4_proto)
+{
+       struct rte_ipv6_hdr *ipv6_hdr;
+       ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
+                                          m->outer_l2_len);
+       if (ol_flags & PKT_TX_OUTER_UDP_CKSUM) {
+               struct rte_udp_hdr *udp_hdr;
+               /*
+                * If OUTER_UDP_CKSUM is support, HW can caclulate the pseudo
+                * header for TSO packets
+                */
+               if (ol_flags & PKT_TX_TCP_SEG)
+                       return true;
+               udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
+                               m->outer_l2_len + m->outer_l3_len);
+               udp_hdr->dgram_cksum = rte_ipv6_phdr_cksum(ipv6_hdr, ol_flags);
+
+               return true;
+       }
+       *l4_proto = ipv6_hdr->proto;
+       return false;
+}
+
 static void
 hns3_outer_header_cksum_prepare(struct rte_mbuf *m)
 {
        uint64_t ol_flags = m->ol_flags;
        uint32_t paylen, hdr_len, l4_proto;
+       struct rte_udp_hdr *udp_hdr;
 
        if (!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6)))
                return;
 
        if (ol_flags & PKT_TX_OUTER_IPV4) {
-               struct rte_ipv4_hdr *ipv4_hdr;
-               ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
-                                                  m->outer_l2_len);
-               l4_proto = ipv4_hdr->next_proto_id;
-               if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
-                       ipv4_hdr->hdr_checksum = 0;
+               if (hns3_outer_ipv4_cksum_prepared(m, ol_flags, &l4_proto))
+                       return;
        } else {
-               struct rte_ipv6_hdr *ipv6_hdr;
-               ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
-                                                  m->outer_l2_len);
-               l4_proto = ipv6_hdr->proto;
+               if (hns3_outer_ipv6_cksum_prepared(m, ol_flags, &l4_proto))
+                       return;
        }
+
        /* driver should ensure the outer udp cksum is 0 for TUNNEL TSO */
        if (l4_proto == IPPROTO_UDP && (ol_flags & PKT_TX_TCP_SEG)) {
-               struct rte_udp_hdr *udp_hdr;
                hdr_len = m->l2_len + m->l3_len + m->l4_len;
                hdr_len += m->outer_l2_len + m->outer_l3_len;
                paylen = m->pkt_len - hdr_len;
@@ -3673,7 +3739,7 @@ hns3_tx_setup_4bd(struct hns3_desc *txdp, struct rte_mbuf **pkts)
                dma_addr = rte_mbuf_data_iova(*pkts);
                txdp->addr = rte_cpu_to_le_64(dma_addr);
                txdp->tx.send_size = rte_cpu_to_le_16((*pkts)->data_len);
-               txdp->tx.paylen = 0;
+               txdp->tx.paylen_fd_dop_ol4cs = 0;
                txdp->tx.type_cs_vlan_tso_len = 0;
                txdp->tx.ol_type_vlan_len_msec = 0;
                txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag);
@@ -3689,7 +3755,7 @@ hns3_tx_setup_1bd(struct hns3_desc *txdp, struct rte_mbuf **pkts)
        dma_addr = rte_mbuf_data_iova(*pkts);
        txdp->addr = rte_cpu_to_le_64(dma_addr);
        txdp->tx.send_size = rte_cpu_to_le_16((*pkts)->data_len);
-       txdp->tx.paylen = 0;
+       txdp->tx.paylen_fd_dop_ol4cs = 0;
        txdp->tx.type_cs_vlan_tso_len = 0;
        txdp->tx.ol_type_vlan_len_msec = 0;
        txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag);
@@ -3930,19 +3996,32 @@ hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep)
 {
        uint64_t offloads = dev->data->dev_conf.txmode.offloads;
        struct hns3_adapter *hns = dev->data->dev_private;
+       bool vec_allowed, sve_allowed, simple_allowed;
 
-       if (hns->tx_vec_allowed && hns3_tx_check_vec_support(dev) == 0) {
-               *prep = NULL;
-               return hns3_check_sve_support() ? hns3_xmit_pkts_vec_sve :
-                       hns3_xmit_pkts_vec;
-       }
+       vec_allowed = hns->tx_vec_allowed &&
+                     hns3_tx_check_vec_support(dev) == 0;
+       sve_allowed = vec_allowed && hns3_check_sve_support();
+       simple_allowed = hns->tx_simple_allowed &&
+                        offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
+
+       *prep = NULL;
 
-       if (hns->tx_simple_allowed &&
-           offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)) {
-               *prep = NULL;
+       if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed)
+               return hns3_xmit_pkts_vec;
+       if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_SVE && sve_allowed)
+               return hns3_xmit_pkts_vec_sve;
+       if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_SIMPLE && simple_allowed)
                return hns3_xmit_pkts_simple;
+       if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_COMMON) {
+               *prep = hns3_prep_pkts;
+               return hns3_xmit_pkts;
        }
 
+       if (vec_allowed)
+               return hns3_xmit_pkts_vec;
+       if (simple_allowed)
+               return hns3_xmit_pkts_simple;
+
        *prep = hns3_prep_pkts;
        return hns3_xmit_pkts;
 }
@@ -3963,8 +4042,10 @@ void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
        if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
            __atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) {
                eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev);
+               eth_dev->rx_descriptor_status = hns3_dev_rx_descriptor_status;
                eth_dev->tx_pkt_burst = hns3_get_tx_function(eth_dev, &prep);
                eth_dev->tx_pkt_prepare = prep;
+               eth_dev->tx_descriptor_status = hns3_dev_tx_descriptor_status;
        } else {
                eth_dev->rx_pkt_burst = hns3_dummy_rxtx_burst;
                eth_dev->tx_pkt_burst = hns3_dummy_rxtx_burst;
@@ -4177,6 +4258,68 @@ hns3_tx_done_cleanup(void *txq, uint32_t free_cnt)
                return -ENOTSUP;
 }
 
+int
+hns3_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+       volatile struct hns3_desc *rxdp;
+       struct hns3_rx_queue *rxq;
+       struct rte_eth_dev *dev;
+       uint32_t bd_base_info;
+       uint16_t desc_id;
+
+       rxq = (struct hns3_rx_queue *)rx_queue;
+       if (offset >= rxq->nb_rx_desc)
+               return -EINVAL;
+
+       desc_id = (rxq->next_to_use + offset) % rxq->nb_rx_desc;
+       rxdp = &rxq->rx_ring[desc_id];
+       bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
+       dev = &rte_eth_devices[rxq->port_id];
+       if (dev->rx_pkt_burst == hns3_recv_pkts ||
+           dev->rx_pkt_burst == hns3_recv_scattered_pkts) {
+               if (offset >= rxq->nb_rx_desc - rxq->rx_free_hold)
+                       return RTE_ETH_RX_DESC_UNAVAIL;
+       } else if (dev->rx_pkt_burst == hns3_recv_pkts_vec ||
+                  dev->rx_pkt_burst == hns3_recv_pkts_vec_sve){
+               if (offset >= rxq->nb_rx_desc - rxq->rx_rearm_nb)
+                       return RTE_ETH_RX_DESC_UNAVAIL;
+       } else {
+               return RTE_ETH_RX_DESC_UNAVAIL;
+       }
+
+       if (!(bd_base_info & BIT(HNS3_RXD_VLD_B)))
+               return RTE_ETH_RX_DESC_AVAIL;
+       else
+               return RTE_ETH_RX_DESC_DONE;
+}
+
+int
+hns3_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+       volatile struct hns3_desc *txdp;
+       struct hns3_tx_queue *txq;
+       struct rte_eth_dev *dev;
+       uint16_t desc_id;
+
+       txq = (struct hns3_tx_queue *)tx_queue;
+       if (offset >= txq->nb_tx_desc)
+               return -EINVAL;
+
+       dev = &rte_eth_devices[txq->port_id];
+       if (dev->tx_pkt_burst != hns3_xmit_pkts_simple &&
+           dev->tx_pkt_burst != hns3_xmit_pkts &&
+           dev->tx_pkt_burst != hns3_xmit_pkts_vec_sve &&
+           dev->tx_pkt_burst != hns3_xmit_pkts_vec)
+               return RTE_ETH_TX_DESC_UNAVAIL;
+
+       desc_id = (txq->next_to_use + offset) % txq->nb_tx_desc;
+       txdp = &txq->tx_ring[desc_id];
+       if (txdp->tx.tp_fe_sc_vld_ra_ri & rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B)))
+               return RTE_ETH_TX_DESC_FULL;
+       else
+               return RTE_ETH_TX_DESC_DONE;
+}
+
 uint32_t
 hns3_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {