mbuf: rename data address helpers to IOVA
[dpdk.git] / drivers / net / i40e / i40e_rxtx.c
index 3a7b68e..8b4f612 100644 (file)
@@ -108,7 +108,7 @@ i40e_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union i40e_rx_desc *rxdp)
 {
        if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
                (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
-               mb->ol_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
+               mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
                mb->vlan_tci =
                        rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
                PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
@@ -589,7 +589,7 @@ i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq)
                mb->nb_segs = 1;
                mb->port = rxq->port_id;
                dma_addr = rte_cpu_to_le_64(\
-                       rte_mbuf_data_dma_addr_default(mb));
+                       rte_mbuf_data_iova_default(mb));
                rxdp[i].read.hdr_addr = 0;
                rxdp[i].read.pkt_addr = dma_addr;
        }
@@ -752,7 +752,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                rxm = rxe->mbuf;
                rxe->mbuf = nmb;
                dma_addr =
-                       rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+                       rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
                rxdp->read.hdr_addr = 0;
                rxdp->read.pkt_addr = dma_addr;
 
@@ -869,7 +869,7 @@ i40e_recv_scattered_pkts(void *rx_queue,
                rxm = rxe->mbuf;
                rxe->mbuf = nmb;
                dma_addr =
-                       rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+                       rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
 
                /* Set data buffer address and data length of the mbuf */
                rxdp->read.hdr_addr = 0;
@@ -1202,7 +1202,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
                        /* Setup TX Descriptor */
                        slen = m_seg->data_len;
-                       buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
+                       buf_dma_addr = rte_mbuf_data_iova(m_seg);
 
                        PMD_TX_LOG(DEBUG, "mbuf: %p, TDD[%u]:\n"
                                "buf_dma_addr: %#"PRIx64";\n"
@@ -1301,7 +1301,7 @@ tx4(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts)
        uint32_t i;
 
        for (i = 0; i < 4; i++, txdp++, pkts++) {
-               dma_addr = rte_mbuf_data_dma_addr(*pkts);
+               dma_addr = rte_mbuf_data_iova(*pkts);
                txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
                txdp->cmd_type_offset_bsz =
                        i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
@@ -1315,7 +1315,7 @@ tx1(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts)
 {
        uint64_t dma_addr;
 
-       dma_addr = rte_mbuf_data_dma_addr(*pkts);
+       dma_addr = rte_mbuf_data_iova(*pkts);
        txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
        txdp->cmd_type_offset_bsz =
                i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
@@ -1823,7 +1823,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
        /* Zero all the descriptors in the ring. */
        memset(rz->addr, 0, ring_size);
 
-       rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+       rxq->rx_ring_phys_addr = rz->iova;
        rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
 
        len = (uint16_t)(nb_desc + RTE_PMD_I40E_RX_MAX_BURST);
@@ -2161,7 +2161,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
        txq->vsi = vsi;
        txq->tx_deferred_start = tx_conf->tx_deferred_start;
 
-       txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+       txq->tx_ring_phys_addr = tz->iova;
        txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
 
        /* Allocate software ring */
@@ -2223,12 +2223,8 @@ i40e_memzone_reserve(const char *name, uint32_t len, int socket_id)
        if (mz)
                return mz;
 
-       if (rte_xen_dom0_supported())
-               mz = rte_memzone_reserve_bounded(name, len,
-                               socket_id, 0, I40E_RING_BASE_ALIGN, RTE_PGSIZE_2M);
-       else
-               mz = rte_memzone_reserve_aligned(name, len,
-                               socket_id, 0, I40E_RING_BASE_ALIGN);
+       mz = rte_memzone_reserve_aligned(name, len,
+                                        socket_id, 0, I40E_RING_BASE_ALIGN);
        return mz;
 }
 
@@ -2309,18 +2305,40 @@ i40e_reset_rx_queue(struct i40e_rx_queue *rxq)
 void
 i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq)
 {
+       struct rte_eth_dev *dev;
        uint16_t i;
 
+       dev = &rte_eth_devices[txq->port_id];
+
        if (!txq || !txq->sw_ring) {
                PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
                return;
        }
 
-       for (i = 0; i < txq->nb_tx_desc; i++) {
-               if (txq->sw_ring[i].mbuf) {
+       /**
+        *  vPMD tx will not set sw_ring's mbuf to NULL after free,
+        *  so need to free remains more carefully.
+        */
+       if (dev->tx_pkt_burst == i40e_xmit_pkts_vec) {
+               i = txq->tx_next_dd - txq->tx_rs_thresh + 1;
+               if (txq->tx_tail < i) {
+                       for (; i < txq->nb_tx_desc; i++) {
+                               rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+                               txq->sw_ring[i].mbuf = NULL;
+                       }
+                       i = 0;
+               }
+               for (; i < txq->tx_tail; i++) {
                        rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
                        txq->sw_ring[i].mbuf = NULL;
                }
+       } else {
+               for (i = 0; i < txq->nb_tx_desc; i++) {
+                       if (txq->sw_ring[i].mbuf) {
+                               rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+                               txq->sw_ring[i].mbuf = NULL;
+                       }
+               }
        }
 }
 
@@ -2433,7 +2451,7 @@ i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq)
                mbuf->port = rxq->port_id;
 
                dma_addr =
-                       rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
+                       rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
 
                rxd = &rxq->rx_ring[i];
                rxd->read.pkt_addr = dma_addr;
@@ -2677,7 +2695,7 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
        txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
        txq->vsi = pf->fdir.fdir_vsi;
 
-       txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+       txq->tx_ring_phys_addr = tz->iova;
        txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
        /*
         * don't need to allocate software ring and reset for the fdir
@@ -2733,7 +2751,7 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
        rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
        rxq->vsi = pf->fdir.fdir_vsi;
 
-       rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+       rxq->rx_ring_phys_addr = rz->iova;
        rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
 
        /*
@@ -2943,6 +2961,64 @@ i40e_set_default_ptype_table(struct rte_eth_dev *dev)
                ad->ptype_tbl[i] = i40e_get_default_pkt_type(i);
 }
 
+void __attribute__((cold))
+i40e_set_default_pctype_table(struct rte_eth_dev *dev)
+{
+       struct i40e_adapter *ad =
+                       I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       int i;
+
+       for (i = 0; i < I40E_FLOW_TYPE_MAX; i++)
+               ad->pctypes_tbl[i] = 0ULL;
+       ad->flow_types_mask = 0ULL;
+       ad->pctypes_mask = 0ULL;
+
+       ad->pctypes_tbl[RTE_ETH_FLOW_FRAG_IPV4] =
+                               (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4);
+       ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_UDP] =
+                               (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
+       ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_TCP] =
+                               (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+       ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] =
+                               (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP);
+       ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] =
+                               (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+       ad->pctypes_tbl[RTE_ETH_FLOW_FRAG_IPV6] =
+                               (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6);
+       ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_UDP] =
+                               (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
+       ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_TCP] =
+                               (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+       ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] =
+                               (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP);
+       ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] =
+                               (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+       ad->pctypes_tbl[RTE_ETH_FLOW_L2_PAYLOAD] =
+                               (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD);
+
+       if (hw->mac.type == I40E_MAC_X722) {
+               ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_UDP] |=
+                       (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP);
+               ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_UDP] |=
+                       (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
+               ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_TCP] |=
+                       (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
+               ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_UDP] |=
+                       (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP);
+               ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_UDP] |=
+                       (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
+               ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_TCP] |=
+                       (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
+       }
+
+       for (i = 0; i < I40E_FLOW_TYPE_MAX; i++) {
+               if (ad->pctypes_tbl[i])
+                       ad->flow_types_mask |= (1ULL << i);
+               ad->pctypes_mask |= ad->pctypes_tbl[i];
+       }
+}
+
 /* Stubs needed for linkage when CONFIG_RTE_I40E_INC_VECTOR is set to 'n' */
 int __attribute__((weak))
 i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)