examples/l3fwd: share queue size variables
[dpdk.git] / drivers / net / iavf / iavf_rxtx.c
index 59623ac..cb77987 100644 (file)
@@ -230,8 +230,7 @@ reset_rx_queue(struct iavf_rx_queue *rxq)
        rxq->rx_tail = 0;
        rxq->nb_rx_hold = 0;
 
-       if (rxq->pkt_first_seg != NULL)
-               rte_pktmbuf_free(rxq->pkt_first_seg);
+       rte_pktmbuf_free(rxq->pkt_first_seg);
 
        rxq->pkt_first_seg = NULL;
        rxq->pkt_last_seg = NULL;
@@ -476,54 +475,56 @@ iavf_rxd_to_pkt_fields_by_comms_aux_v2(struct iavf_rx_queue *rxq,
 #endif
 }
 
+static const
+iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields_ops[] = {
+       [IAVF_RXDID_COMMS_AUX_VLAN] = iavf_rxd_to_pkt_fields_by_comms_aux_v1,
+       [IAVF_RXDID_COMMS_AUX_IPV4] = iavf_rxd_to_pkt_fields_by_comms_aux_v1,
+       [IAVF_RXDID_COMMS_AUX_IPV6] = iavf_rxd_to_pkt_fields_by_comms_aux_v1,
+       [IAVF_RXDID_COMMS_AUX_IPV6_FLOW] =
+               iavf_rxd_to_pkt_fields_by_comms_aux_v1,
+       [IAVF_RXDID_COMMS_AUX_TCP] = iavf_rxd_to_pkt_fields_by_comms_aux_v1,
+       [IAVF_RXDID_COMMS_AUX_IP_OFFSET] =
+               iavf_rxd_to_pkt_fields_by_comms_aux_v2,
+       [IAVF_RXDID_COMMS_IPSEC_CRYPTO] =
+               iavf_rxd_to_pkt_fields_by_comms_aux_v2,
+       [IAVF_RXDID_COMMS_OVS_1] = iavf_rxd_to_pkt_fields_by_comms_ovs,
+};
+
 static void
 iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
 {
+       rxq->rxdid = rxdid;
+
        switch (rxdid) {
        case IAVF_RXDID_COMMS_AUX_VLAN:
                rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
-               rxq->rxd_to_pkt_fields =
-                       iavf_rxd_to_pkt_fields_by_comms_aux_v1;
                break;
        case IAVF_RXDID_COMMS_AUX_IPV4:
                rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
-               rxq->rxd_to_pkt_fields =
-                       iavf_rxd_to_pkt_fields_by_comms_aux_v1;
                break;
        case IAVF_RXDID_COMMS_AUX_IPV6:
                rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
-               rxq->rxd_to_pkt_fields =
-                       iavf_rxd_to_pkt_fields_by_comms_aux_v1;
                break;
        case IAVF_RXDID_COMMS_AUX_IPV6_FLOW:
                rxq->xtr_ol_flag =
                        rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
-               rxq->rxd_to_pkt_fields =
-                       iavf_rxd_to_pkt_fields_by_comms_aux_v1;
                break;
        case IAVF_RXDID_COMMS_AUX_TCP:
                rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
-               rxq->rxd_to_pkt_fields =
-                       iavf_rxd_to_pkt_fields_by_comms_aux_v1;
                break;
        case IAVF_RXDID_COMMS_AUX_IP_OFFSET:
                rxq->xtr_ol_flag =
                        rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
-               rxq->rxd_to_pkt_fields =
-                       iavf_rxd_to_pkt_fields_by_comms_aux_v2;
                break;
        case IAVF_RXDID_COMMS_IPSEC_CRYPTO:
                rxq->xtr_ol_flag =
                        rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
-               rxq->rxd_to_pkt_fields =
-                       iavf_rxd_to_pkt_fields_by_comms_aux_v2;
                break;
        case IAVF_RXDID_COMMS_OVS_1:
-               rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
                break;
        default:
                /* update this according to the RXDID for FLEX_DESC_NONE */
-               rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
+               rxq->rxdid = IAVF_RXDID_COMMS_OVS_1;
                break;
        }
 
@@ -1484,7 +1485,7 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
                iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
                iavf_flex_rxd_to_ipsec_crypto_status(rxm, &rxd,
                                &rxq->stats.ipsec_crypto);
-               rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
+               rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd);
                pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
                rxm->ol_flags |= pkt_flags;
 
@@ -1628,7 +1629,7 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
                iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
                iavf_flex_rxd_to_ipsec_crypto_status(first_seg, &rxd,
                                &rxq->stats.ipsec_crypto);
-               rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
+               rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd);
                pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
                first_seg->ol_flags |= pkt_flags;
@@ -1819,7 +1820,7 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
        struct rte_mbuf *mb;
        uint16_t stat_err0;
        uint16_t pkt_len;
-       int32_t s[IAVF_LOOK_AHEAD], nb_dd;
+       int32_t s[IAVF_LOOK_AHEAD], var, nb_dd;
        int32_t i, j, nb_rx = 0;
        uint64_t pkt_flags;
        const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
@@ -1844,9 +1845,27 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 
                rte_smp_rmb();
 
-               /* Compute how many status bits were set */
-               for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
-                       nb_dd += s[j] & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S);
+               /* Compute how many contiguous DD bits were set */
+               for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++) {
+                       var = s[j] & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S);
+#ifdef RTE_ARCH_ARM
+                       /* For Arm platforms, count only contiguous descriptors
+                        * whose DD bit is set to 1. On Arm platforms, reads of
+                        * descriptors can be reordered. Since the CPU may
+                        * be reading the descriptors as the NIC updates them
+                        * in memory, it is possbile that the DD bit for a
+                        * descriptor earlier in the queue is read as not set
+                        * while the DD bit for a descriptor later in the queue
+                        * is read as set.
+                        */
+                       if (var)
+                               nb_dd += 1;
+                       else
+                               break;
+#else
+                       nb_dd += var;
+#endif
+               }
 
                nb_rx += nb_dd;
 
@@ -1868,7 +1887,7 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
                        iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
                        iavf_flex_rxd_to_ipsec_crypto_status(mb, &rxdp[j],
                                &rxq->stats.ipsec_crypto);
-                       rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
+                       rxd_to_pkt_fields_ops[rxq->rxdid](rxq, mb, &rxdp[j]);
                        stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
                        pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
 
@@ -1898,7 +1917,7 @@ iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq)
        uint16_t pkt_len;
        uint64_t qword1;
        uint32_t rx_status;
-       int32_t s[IAVF_LOOK_AHEAD], nb_dd;
+       int32_t s[IAVF_LOOK_AHEAD], var, nb_dd;
        int32_t i, j, nb_rx = 0;
        uint64_t pkt_flags;
        const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
@@ -1929,9 +1948,27 @@ iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq)
 
                rte_smp_rmb();
 
-               /* Compute how many status bits were set */
-               for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
-                       nb_dd += s[j] & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
+               /* Compute how many contiguous DD bits were set */
+               for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++) {
+                       var = s[j] & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
+#ifdef RTE_ARCH_ARM
+                       /* For Arm platforms, count only contiguous descriptors
+                        * whose DD bit is set to 1. On Arm platforms, reads of
+                        * descriptors can be reordered. Since the CPU may
+                        * be reading the descriptors as the NIC updates them
+                        * in memory, it is possbile that the DD bit for a
+                        * descriptor earlier in the queue is read as not set
+                        * while the DD bit for a descriptor later in the queue
+                        * is read as set.
+                        */
+                       if (var)
+                               nb_dd += 1;
+                       else
+                               break;
+#else
+                       nb_dd += var;
+#endif
+               }
 
                nb_rx += nb_dd;
 
@@ -2439,6 +2476,14 @@ iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
                if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
                        hdrlen += ipseclen;
                bufsz = hdrlen + tlen;
+       } else if ((m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) &&
+                       (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
+                                       RTE_MBUF_F_TX_UDP_SEG))) {
+               hdrlen += m->outer_l3_len + m->l3_len + ipseclen;
+               if (m->ol_flags & RTE_MBUF_F_TX_L4_MASK)
+                       hdrlen += m->l4_len;
+               bufsz = hdrlen + tlen;
+
        } else {
                bufsz = m->data_len;
        }