net/i40e: convert to new Tx offloads API
[dpdk.git] / drivers / net / qede / qede_rxtx.c
index df248cf..2b9db32 100644 (file)
@@ -84,11 +84,10 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
        rxq->port_id = dev->data->port_id;
 
        max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
-       qdev->mtu = max_rx_pkt_len;
 
        /* Fix up RX buffer size */
        bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
-       if ((rxmode->enable_scatter)                    ||
+       if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) ||
            (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
                if (!dev->data->scattered_rx) {
                        DP_INFO(edev, "Forcing scatter-gather mode\n");
@@ -97,9 +96,10 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
        }
 
        if (dev->data->scattered_rx)
-               rxq->rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
+               rxq->rx_buf_size = bufsz + ETHER_HDR_LEN +
+                                  ETHER_CRC_LEN + QEDE_ETH_OVERHEAD;
        else
-               rxq->rx_buf_size = qdev->mtu + QEDE_ETH_OVERHEAD;
+               rxq->rx_buf_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD;
        /* Align to cache-line size if needed */
        rxq->rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rxq->rx_buf_size);
 
@@ -158,7 +158,7 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
        qdev->fp_array[queue_idx].rxq = rxq;
 
        DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
-                 queue_idx, nb_desc, qdev->mtu, socket_id);
+                 queue_idx, nb_desc, rxq->rx_buf_size, socket_id);
 
        return 0;
 }
@@ -417,6 +417,8 @@ int qede_alloc_fp_resc(struct qede_dev *qdev)
 
        for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
                fp = &qdev->fp_array[sb_idx];
+               if (!fp)
+                       continue;
                fp->sb_info = rte_calloc("sb", 1, sizeof(struct ecore_sb_info),
                                RTE_CACHE_LINE_SIZE);
                if (!fp->sb_info) {
@@ -448,6 +450,8 @@ void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
 
        for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
                fp = &qdev->fp_array[sb_idx];
+               if (!fp)
+                       continue;
                DP_INFO(edev, "Free sb_info index 0x%x\n",
                                fp->sb_info->igu_sb_id);
                if (fp->sb_info) {
@@ -1462,6 +1466,8 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                         */
                        rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
                        packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
+               } else {
+                       packet_type |= qede_rx_cqe_to_pkt_type(parse_flag);
                }
 
                /* Common handling for non-tunnel packets and for inner
@@ -1483,7 +1489,6 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                        ol_flags |= PKT_RX_IP_CKSUM_BAD;
                } else {
                        ol_flags |= PKT_RX_IP_CKSUM_GOOD;
-                       packet_type |= qede_rx_cqe_to_pkt_type(parse_flag);
                }
 
                if (CQE_HAS_VLAN(parse_flag) ||
@@ -1833,17 +1838,14 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                 * offloads. Don't rely on pkt_type marked by Rx, instead use
                 * tx_ol_flags to decide.
                 */
-               if (((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
-                                               PKT_TX_TUNNEL_VXLAN) ||
-                   ((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
-                                               PKT_TX_TUNNEL_MPLSINUDP) ||
-                   ((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
-                                               PKT_TX_TUNNEL_GENEVE)) {
+               tunn_flg = !!(tx_ol_flags & PKT_TX_TUNNEL_MASK);
+
+               if (tunn_flg) {
                        /* Check against max which is Tunnel IPv6 + ext */
                        if (unlikely(txq->nb_tx_avail <
                                ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT))
                                        break;
-                       tunn_flg = true;
+
                        /* First indicate its a tunnel pkt */
                        bd1_bf |= ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
                                  ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
@@ -1982,7 +1984,8 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                         * csum offload is requested then we need to force
                         * recalculation of L4 tunnel header csum also.
                         */
-                       if (tunn_flg) {
+                       if (tunn_flg && ((tx_ol_flags & PKT_TX_TUNNEL_MASK) !=
+                                                       PKT_TX_TUNNEL_GRE)) {
                                bd1_bd_flags_bf |=
                                        ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
                                        ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;