net/qede: fix compilation with -Og
[dpdk.git] / drivers / net / qede / qede_rxtx.c
index 2b207c3..45b4aeb 100644 (file)
@@ -111,9 +111,8 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
        rxq->sw_rx_ring = rte_zmalloc_socket("sw_rx_ring", size,
                                             RTE_CACHE_LINE_SIZE, socket_id);
        if (!rxq->sw_rx_ring) {
-               DP_NOTICE(edev, false,
-                         "Unable to alloc memory for sw_rx_ring on socket %u\n",
-                         socket_id);
+               DP_ERR(edev, "Memory allocation fails for sw_rx_ring on"
+                      " socket %u\n", socket_id);
                rte_free(rxq);
                return -ENOMEM;
        }
@@ -129,9 +128,8 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                                            NULL);
 
        if (rc != ECORE_SUCCESS) {
-               DP_NOTICE(edev, false,
-                         "Unable to alloc memory for rxbd ring on socket %u\n",
-                         socket_id);
+               DP_ERR(edev, "Memory allocation fails for RX BD ring"
+                      " on socket %u\n", socket_id);
                rte_free(rxq->sw_rx_ring);
                rte_free(rxq);
                return -ENOMEM;
@@ -148,9 +146,8 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                                            NULL);
 
        if (rc != ECORE_SUCCESS) {
-               DP_NOTICE(edev, false,
-                         "Unable to alloc memory for cqe ring on socket %u\n",
-                         socket_id);
+               DP_ERR(edev, "Memory allocation fails for RX CQE ring"
+                      " on socket %u\n", socket_id);
                qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
                rte_free(rxq->sw_rx_ring);
                rte_free(rxq);
@@ -367,12 +364,12 @@ qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
                  uint16_t sb_id)
 {
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-       struct status_block *sb_virt;
+       struct status_block_e4 *sb_virt;
        dma_addr_t sb_phys;
        int rc;
 
        sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys,
-                                         sizeof(struct status_block));
+                                         sizeof(struct status_block_e4));
        if (!sb_virt) {
                DP_ERR(edev, "Status block allocation failed\n");
                return -ENOMEM;
@@ -382,7 +379,7 @@ qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
        if (rc) {
                DP_ERR(edev, "Status block initialization failed\n");
                OSAL_DMA_FREE_COHERENT(edev, sb_virt, sb_phys,
-                                      sizeof(struct status_block));
+                                      sizeof(struct status_block_e4));
                return rc;
        }
 
@@ -456,7 +453,7 @@ void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
                if (fp->sb_info) {
                        OSAL_DMA_FREE_COHERENT(edev, fp->sb_info->sb_virt,
                                fp->sb_info->sb_phys,
-                               sizeof(struct status_block));
+                               sizeof(struct status_block_e4));
                        rte_free(fp->sb_info);
                        fp->sb_info = NULL;
                }
@@ -555,9 +552,10 @@ qede_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
                ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
                /* Prepare ramrod */
                memset(&params, 0, sizeof(params));
-               params.queue_id = rx_queue_id;
+               params.queue_id = rx_queue_id / edev->num_hwfns;
                params.vport_id = 0;
-               params.sb = fp->sb_info->igu_sb_id;
+               params.stats_id = params.vport_id;
+               params.p_sb = fp->sb_info;
                DP_INFO(edev, "rxq %u igu_sb_id 0x%x\n",
                                fp->rxq->queue_id, fp->sb_info->igu_sb_id);
                params.sb_idx = RX_PI;
@@ -613,9 +611,10 @@ qede_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
                txq = eth_dev->data->tx_queues[tx_queue_id];
                fp = &qdev->fp_array[tx_queue_id];
                memset(&params, 0, sizeof(params));
-               params.queue_id = tx_queue_id;
+               params.queue_id = tx_queue_id / edev->num_hwfns;
                params.vport_id = 0;
-               params.sb = fp->sb_info->igu_sb_id;
+               params.stats_id = params.vport_id;
+               params.p_sb = fp->sb_info;
                DP_INFO(edev, "txq %u igu_sb_id 0x%x\n",
                                fp->txq->queue_id, fp->sb_info->igu_sb_id);
                params.sb_idx = TX_PI(0); /* tc = 0 */
@@ -781,7 +780,7 @@ int qede_start_queues(struct rte_eth_dev *eth_dev)
 {
        struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
        uint8_t id;
-       int rc;
+       int rc = -1;
 
        for_each_rss(id) {
                rc = qede_rx_queue_start(eth_dev, id);
@@ -1682,12 +1681,6 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                                inner_l4_hdr_offset = (mbuf->l2_len -
                                        MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2;
 
-                               /* TODO: There's no DPDK flag to request outer
-                                * L4 checksum offload, so we don't do it.
-                                * bd1_bd_flags_bf |=
-                                *      ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
-                                *      ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
-                                */
                                /* Inner L2 size and address type */
                                bd2_bf1 |= (inner_l2_hdr_size &
                                        ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK) <<
@@ -1766,15 +1759,36 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                }
 
                /* Offload the IP checksum in the hardware */
-               if (tx_ol_flags & PKT_TX_IP_CKSUM)
+               if (tx_ol_flags & PKT_TX_IP_CKSUM) {
                        bd1_bd_flags_bf |=
                                1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+                       /* There's no DPDK flag to request outer-L4 csum
+                        * offload. But in the case of tunnel if inner L3 or L4
+                        * csum offload is requested then we need to force
+                        * recalculation of L4 tunnel header csum also.
+                        */
+                       if (tunn_flg) {
+                               bd1_bd_flags_bf |=
+                                       ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
+                                       ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
+                       }
+               }
 
                /* L4 checksum offload (tcp or udp) */
                if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
                    (tx_ol_flags & (PKT_TX_UDP_CKSUM | PKT_TX_TCP_CKSUM))) {
                        bd1_bd_flags_bf |=
                                1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
+                       /* There's no DPDK flag to request outer-L4 csum
+                        * offload. But in the case of tunnel if inner L3 or L4
+                        * csum offload is requested then we need to force
+                        * recalculation of L4 tunnel header csum also.
+                        */
+                       if (tunn_flg) {
+                               bd1_bd_flags_bf |=
+                                       ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
+                                       ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
+                       }
                }
 
                /* Fill the entry in the SW ring and the BDs in the FW ring */