mbuf: add accessors for data room and private size
[dpdk.git] / lib / librte_pmd_i40e / i40e_rxtx.c
index c9f1026..493cfa3 100644 (file)
@@ -465,16 +465,13 @@ static inline void
 i40e_txd_enable_checksum(uint64_t ol_flags,
                        uint32_t *td_cmd,
                        uint32_t *td_offset,
-                       uint8_t l2_len,
-                       uint16_t l3_len,
-                       uint8_t outer_l2_len,
-                       uint16_t outer_l3_len,
+                       union i40e_tx_offload tx_offload,
                        uint32_t *cd_tunneling)
 {
        /* UDP tunneling packet TX checksum offload */
        if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
 
-               *td_offset |= (outer_l2_len >> 1)
+               *td_offset |= (tx_offload.outer_l2_len >> 1)
                                << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
 
                if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
@@ -485,25 +482,35 @@ i40e_txd_enable_checksum(uint64_t ol_flags,
                        *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
 
                /* Now set the ctx descriptor fields */
-               *cd_tunneling |= (outer_l3_len >> 2) <<
+               *cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
                                I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
-                               (l2_len >> 1) <<
+                               (tx_offload.l2_len >> 1) <<
                                I40E_TXD_CTX_QW0_NATLEN_SHIFT;
 
        } else
-               *td_offset |= (l2_len >> 1)
+               *td_offset |= (tx_offload.l2_len >> 1)
                        << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
 
        /* Enable L3 checksum offloads */
        if (ol_flags & PKT_TX_IP_CKSUM) {
                *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
-               *td_offset |= (l3_len >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+               *td_offset |= (tx_offload.l3_len >> 2)
+                               << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
        } else if (ol_flags & PKT_TX_IPV4) {
                *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
-               *td_offset |= (l3_len >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+               *td_offset |= (tx_offload.l3_len >> 2)
+                               << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
        } else if (ol_flags & PKT_TX_IPV6) {
                *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
-               *td_offset |= (l3_len >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+               *td_offset |= (tx_offload.l3_len >> 2)
+                               << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+       }
+
+       if (ol_flags & PKT_TX_TCP_SEG) {
+               *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
+               *td_offset |= (tx_offload.l4_len >> 2)
+                       << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+               return;
        }
 
        /* Enable L4 checksum offloads */
@@ -613,7 +620,7 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct i40e_rx_queue *rxq)
                             "rxq->nb_rx_desc=%d",
                             rxq->rx_free_thresh, rxq->nb_rx_desc);
                ret = -EINVAL;
-       } else if (!(rxq->nb_rx_desc % rxq->rx_free_thresh) == 0) {
+       } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
                PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
                             "rxq->nb_rx_desc=%d, "
                             "rxq->rx_free_thresh=%d",
@@ -1154,7 +1161,7 @@ i40e_calc_context_desc(uint64_t flags)
 {
        uint64_t mask = 0ULL;
 
-       mask |= PKT_TX_OUTER_IP_CKSUM;
+       mask |= (PKT_TX_OUTER_IP_CKSUM | PKT_TX_TCP_SEG);
 
 #ifdef RTE_LIBRTE_IEEE1588
        mask |= PKT_TX_IEEE1588_TMST;
@@ -1165,6 +1172,39 @@ i40e_calc_context_desc(uint64_t flags)
        return 0;
 }
 
+/* set i40e TSO context descriptor */
+static inline uint64_t
+i40e_set_tso_ctx(struct rte_mbuf *mbuf, union i40e_tx_offload tx_offload)
+{
+       uint64_t ctx_desc = 0;
+       uint32_t cd_cmd, hdr_len, cd_tso_len;
+
+       if (!tx_offload.l4_len) {
+               PMD_DRV_LOG(DEBUG, "L4 length set to 0");
+               return ctx_desc;
+       }
+
+       /**
+        * in case of tunneling packet, the outer_l2_len and
+        * outer_l3_len must be 0.
+        */
+       hdr_len = tx_offload.outer_l2_len +
+               tx_offload.outer_l3_len +
+               tx_offload.l2_len +
+               tx_offload.l3_len +
+               tx_offload.l4_len;
+
+       cd_cmd = I40E_TX_CTX_DESC_TSO;
+       cd_tso_len = mbuf->pkt_len - hdr_len;
+       ctx_desc |= ((uint64_t)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
+               ((uint64_t)cd_tso_len <<
+                I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
+               ((uint64_t)mbuf->tso_segsz <<
+                I40E_TXD_CTX_QW1_MSS_SHIFT);
+
+       return ctx_desc;
+}
+
 uint16_t
 i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
@@ -1183,15 +1223,12 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
        uint32_t tx_flags;
        uint32_t td_tag;
        uint64_t ol_flags;
-       uint8_t l2_len;
-       uint16_t l3_len;
-       uint8_t outer_l2_len;
-       uint16_t outer_l3_len;
        uint16_t nb_used;
        uint16_t nb_ctx;
        uint16_t tx_last;
        uint16_t slen;
        uint64_t buf_dma_addr;
+       union i40e_tx_offload tx_offload = {0};
 
        txq = tx_queue;
        sw_ring = txq->sw_ring;
@@ -1213,10 +1250,12 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
 
                ol_flags = tx_pkt->ol_flags;
-               l2_len = tx_pkt->l2_len;
-               l3_len = tx_pkt->l3_len;
-               outer_l2_len = tx_pkt->outer_l2_len;
-               outer_l3_len = tx_pkt->outer_l3_len;
+               tx_offload.l2_len = tx_pkt->l2_len;
+               tx_offload.l3_len = tx_pkt->l3_len;
+               tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
+               tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
+               tx_offload.l4_len = tx_pkt->l4_len;
+               tx_offload.tso_segsz = tx_pkt->tso_segsz;
 
                /* Calculate the number of context descriptors needed. */
                nb_ctx = i40e_calc_context_desc(ol_flags);
@@ -1267,9 +1306,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                cd_tunneling_params = 0;
                if (unlikely(ol_flags & I40E_TX_CKSUM_OFFLOAD_MASK)) {
                        i40e_txd_enable_checksum(ol_flags, &td_cmd, &td_offset,
-                               l2_len, l3_len, outer_l2_len,
-                               outer_l3_len,
-                               &cd_tunneling_params);
+                               tx_offload, &cd_tunneling_params);
                }
 
                if (unlikely(nb_ctx)) {
@@ -1287,12 +1324,20 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                                rte_pktmbuf_free_seg(txe->mbuf);
                                txe->mbuf = NULL;
                        }
-#ifdef RTE_LIBRTE_IEEE1588
-                       if (ol_flags & PKT_TX_IEEE1588_TMST)
+
+                       /* TSO enabled means no timestamp */
+                       if (ol_flags & PKT_TX_TCP_SEG)
                                cd_type_cmd_tso_mss |=
-                                       ((uint64_t)I40E_TX_CTX_DESC_TSYN <<
-                                               I40E_TXD_CTX_QW1_CMD_SHIFT);
+                                       i40e_set_tso_ctx(tx_pkt, tx_offload);
+                       else {
+#ifdef RTE_LIBRTE_IEEE1588
+                               if (ol_flags & PKT_TX_IEEE1588_TMST)
+                                       cd_type_cmd_tso_mss |=
+                                               ((uint64_t)I40E_TX_CTX_DESC_TSYN <<
+                                                I40E_TXD_CTX_QW1_CMD_SHIFT);
 #endif
+                       }
+
                        ctx_txd->tunneling_params =
                                rte_cpu_to_le_32(cd_tunneling_params);
                        ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
@@ -2399,11 +2444,10 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
        struct i40e_pf *pf = I40E_VSI_TO_PF(rxq->vsi);
        struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
        struct rte_eth_dev_data *data = pf->dev_data;
-       struct rte_pktmbuf_pool_private *mbp_priv =
-                       rte_mempool_get_priv(rxq->mp);
-       uint16_t buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
-                                               RTE_PKTMBUF_HEADROOM);
-       uint16_t len;
+       uint16_t buf_size, len;
+
+       buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
+               RTE_PKTMBUF_HEADROOM);
 
        switch (pf->flags & (I40E_FLAG_HEADER_SPLIT_DISABLED |
                        I40E_FLAG_HEADER_SPLIT_ENABLED)) {
@@ -2461,7 +2505,6 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq)
        uint16_t pf_q = rxq->reg_idx;
        uint16_t buf_size;
        struct i40e_hmc_obj_rxq rx_ctx;
-       struct rte_pktmbuf_pool_private *mbp_priv;
 
        err = i40e_rx_queue_config(rxq);
        if (err < 0) {
@@ -2508,9 +2551,8 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq)
 
        rxq->qrx_tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
 
-       mbp_priv = rte_mempool_get_priv(rxq->mp);
-       buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
-                                       RTE_PKTMBUF_HEADROOM);
+       buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
+               RTE_PKTMBUF_HEADROOM);
 
        /* Check if scattered RX needs to be used. */
        if ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {