mbuf: replace data pointer by an offset
[dpdk.git] / lib / librte_pmd_vmxnet3 / vmxnet3_rxtx.c
index 08d7330..263f9ce 100644 (file)
@@ -79,8 +79,7 @@
 
 
 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
-       (uint64_t) ((mb)->buf_physaddr + (uint64_t)((char *)((mb)->pkt.data) - \
-       (char *)(mb)->buf_addr))
+       (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
 
 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
        (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
@@ -100,7 +99,7 @@ rte_rxmbuf_alloc(struct rte_mempool *mp)
        struct rte_mbuf *m;
 
        m = __rte_mbuf_raw_alloc(mp);
-       __rte_mbuf_sanity_check_raw(m, RTE_MBUF_PKT, 0);
+       __rte_mbuf_sanity_check_raw(m, 0);
        return m;
 }
 
@@ -114,23 +113,23 @@ vmxnet3_rxq_dump(struct vmxnet3_rx_queue *rxq)
                return;
 
        PMD_RX_LOG(DEBUG,
-                  "RXQ: cmd0 base : 0x%p cmd1 base : 0x%p comp ring base : 0x%p.\n",
+                  "RXQ: cmd0 base : 0x%p cmd1 base : 0x%p comp ring base : 0x%p.",
                   rxq->cmd_ring[0].base, rxq->cmd_ring[1].base, rxq->comp_ring.base);
        PMD_RX_LOG(DEBUG,
-                  "RXQ: cmd0 basePA : 0x%lx cmd1 basePA : 0x%lx comp ring basePA : 0x%lx.\n",
+                  "RXQ: cmd0 basePA : 0x%lx cmd1 basePA : 0x%lx comp ring basePA : 0x%lx.",
                   (unsigned long)rxq->cmd_ring[0].basePA,
                   (unsigned long)rxq->cmd_ring[1].basePA,
                   (unsigned long)rxq->comp_ring.basePA);
 
        avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[0]);
        PMD_RX_LOG(DEBUG,
-                  "RXQ:cmd0: size=%u; free=%u; next2proc=%u; queued=%u\n",
+                  "RXQ:cmd0: size=%u; free=%u; next2proc=%u; queued=%u",
                   (uint32_t)rxq->cmd_ring[0].size, avail,
                   rxq->comp_ring.next2proc,
                   rxq->cmd_ring[0].size - avail);
 
        avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[1]);
-       PMD_RX_LOG(DEBUG, "RXQ:cmd1 size=%u; free=%u; next2proc=%u; queued=%u\n",
+       PMD_RX_LOG(DEBUG, "RXQ:cmd1 size=%u; free=%u; next2proc=%u; queued=%u",
                   (uint32_t)rxq->cmd_ring[1].size, avail, rxq->comp_ring.next2proc,
                   rxq->cmd_ring[1].size - avail);
 
@@ -144,14 +143,14 @@ vmxnet3_txq_dump(struct vmxnet3_tx_queue *txq)
        if (txq == NULL)
                return;
 
-       PMD_TX_LOG(DEBUG, "TXQ: cmd base : 0x%p comp ring base : 0x%p.\n",
+       PMD_TX_LOG(DEBUG, "TXQ: cmd base : 0x%p comp ring base : 0x%p.",
                   txq->cmd_ring.base, txq->comp_ring.base);
-       PMD_TX_LOG(DEBUG, "TXQ: cmd basePA : 0x%lx comp ring basePA : 0x%lx.\n",
+       PMD_TX_LOG(DEBUG, "TXQ: cmd basePA : 0x%lx comp ring basePA : 0x%lx.",
                   (unsigned long)txq->cmd_ring.basePA,
                   (unsigned long)txq->comp_ring.basePA);
 
        avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring);
-       PMD_TX_LOG(DEBUG, "TXQ: size=%u; free=%u; next2proc=%u; queued=%u\n",
+       PMD_TX_LOG(DEBUG, "TXQ: size=%u; free=%u; next2proc=%u; queued=%u",
                   (uint32_t)txq->cmd_ring.size, avail,
                   txq->comp_ring.next2proc, txq->cmd_ring.size - avail);
 }
@@ -173,6 +172,7 @@ vmxnet3_cmd_ring_release(vmxnet3_cmd_ring_t *ring)
                vmxnet3_cmd_ring_adv_next2comp(ring);
        }
        rte_free(ring->buf_info);
+       ring->buf_info = NULL;
 }
 
 void
@@ -257,7 +257,7 @@ vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq)
                completed++;
        }
 
-       PMD_TX_LOG(DEBUG, "Processed %d tx comps & command descs.\n", completed);
+       PMD_TX_LOG(DEBUG, "Processed %d tx comps & command descs.", completed);
 }
 
 uint16_t
@@ -274,7 +274,7 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
        hw = txq->hw;
 
        if (txq->stopped) {
-               PMD_TX_LOG(DEBUG, "Tx queue is stopped.\n");
+               PMD_TX_LOG(DEBUG, "Tx queue is stopped.");
                return 0;
        }
 
@@ -288,8 +288,8 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 
                        txm = tx_pkts[nb_tx];
                        /* Don't support scatter packets yet, free them if met */
-                       if (txm->pkt.nb_segs != 1) {
-                               PMD_TX_LOG(DEBUG, "Don't support scatter packets yet, drop!\n");
+                       if (txm->nb_segs != 1) {
+                               PMD_TX_LOG(DEBUG, "Don't support scatter packets yet, drop!");
                                rte_pktmbuf_free(tx_pkts[nb_tx]);
                                txq->stats.drop_total++;
 
@@ -298,8 +298,8 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                        }
 
                        /* Needs to minus ether header len */
-                       if (txm->pkt.data_len > (hw->cur_mtu + ETHER_HDR_LEN)) {
-                               PMD_TX_LOG(DEBUG, "Packet data_len higher than MTU\n");
+                       if (txm->data_len > (hw->cur_mtu + ETHER_HDR_LEN)) {
+                               PMD_TX_LOG(DEBUG, "Packet data_len higher than MTU");
                                rte_pktmbuf_free(tx_pkts[nb_tx]);
                                txq->stats.drop_total++;
 
@@ -313,7 +313,7 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                        tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill;
                        tbi->bufPA = RTE_MBUF_DATA_DMA_ADDR(txm);
                        txd->addr = tbi->bufPA;
-                       txd->len = txm->pkt.data_len;
+                       txd->len = txm->data_len;
 
                        /* Mark the last descriptor as End of Packet. */
                        txd->cq = 1;
@@ -339,7 +339,7 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                        nb_tx++;
 
                } else {
-                       PMD_TX_LOG(DEBUG, "No free tx cmd desc(s)\n");
+                       PMD_TX_LOG(DEBUG, "No free tx cmd desc(s)");
                        txq->stats.drop_total += (nb_pkts - nb_tx);
                        break;
                }
@@ -399,7 +399,7 @@ vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
                /* Allocate blank mbuf for the current Rx Descriptor */
                mbuf = rte_rxmbuf_alloc(rxq->mp);
                if (mbuf == NULL) {
-                       PMD_RX_LOG(ERR, "Error allocating mbuf in %s\n", __func__);
+                       PMD_RX_LOG(ERR, "Error allocating mbuf in %s", __func__);
                        rxq->stats.rx_buf_alloc_failure++;
                        err = ENOMEM;
                        break;
@@ -462,7 +462,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
        rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
 
        if (rxq->stopped) {
-               PMD_RX_LOG(DEBUG, "Rx queue is stopped.\n");
+               PMD_RX_LOG(DEBUG, "Rx queue is stopped.");
                return 0;
        }
 
@@ -483,7 +483,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 
                } else {
 
-                       PMD_RX_LOG(DEBUG, "rxd idx: %d ring idx: %d.\n", idx, ring_idx);
+                       PMD_RX_LOG(DEBUG, "rxd idx: %d ring idx: %d.", idx, ring_idx);
 
 #ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
                        VMXNET3_ASSERT(rcd->len <= rxd->len);
@@ -504,7 +504,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                        if (rxd->btype != VMXNET3_RXD_BTYPE_HEAD) {
                                PMD_RX_LOG(DEBUG,
                                           "Alert : Misbehaving device, incorrect "
-                                          " buffer type used. iPacket dropped.\n");
+                                          " buffer type used. iPacket dropped.");
                                rte_pktmbuf_free_seg(rbi->m);
                                goto rcd_done;
                        }
@@ -528,9 +528,9 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 
                                if (!rcd->fcs) {
                                        rxq->stats.drop_fcs++;
-                                       PMD_RX_LOG(ERR, "Recv packet dropped due to frame err.\n");
+                                       PMD_RX_LOG(ERR, "Recv packet dropped due to frame err.");
                                }
-                               PMD_RX_LOG(ERR, "Error in received packet rcd#:%d rxd:%d\n",
+                               PMD_RX_LOG(ERR, "Error in received packet rcd#:%d rxd:%d",
                                           (int)(rcd - (struct Vmxnet3_RxCompDesc *)
                                                 rxq->comp_ring.base), rcd->rxdIdx);
                                rte_pktmbuf_free_seg(rxm);
@@ -541,7 +541,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                        /* Check for hardware stripped VLAN tag */
                        if (rcd->ts) {
 
-                               PMD_RX_LOG(ERR, "Received packet with vlan ID: %d.\n",
+                               PMD_RX_LOG(ERR, "Received packet with vlan ID: %d.",
                                           rcd->tci);
                                rxm->ol_flags = PKT_RX_VLAN_PKT;
 
@@ -550,21 +550,21 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                                               rte_pktmbuf_mtod(rxm, void *));
 #endif
                                /* Copy vlan tag in packet buffer */
-                               rxm->pkt.vlan_macip.f.vlan_tci =
-                                       rte_le_to_cpu_16((uint16_t)rcd->tci);
+                               rxm->vlan_tci = rte_le_to_cpu_16(
+                                               (uint16_t)rcd->tci);
 
                        } else
                                rxm->ol_flags = 0;
 
                        /* Initialize newly received packet buffer */
-                       rxm->pkt.in_port = rxq->port_id;
-                       rxm->pkt.nb_segs = 1;
-                       rxm->pkt.next = NULL;
-                       rxm->pkt.pkt_len = (uint16_t)rcd->len;
-                       rxm->pkt.data_len = (uint16_t)rcd->len;
-                       rxm->pkt.in_port = rxq->port_id;
-                       rxm->pkt.vlan_macip.f.vlan_tci = 0;
-                       rxm->pkt.data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+                       rxm->port = rxq->port_id;
+                       rxm->nb_segs = 1;
+                       rxm->next = NULL;
+                       rxm->pkt_len = (uint16_t)rcd->len;
+                       rxm->data_len = (uint16_t)rcd->len;
+                       rxm->port = rxq->port_id;
+                       rxm->vlan_tci = 0;
+                       rxm->data_off = RTE_PKTMBUF_HEADROOM;
 
                        rx_pkts[nb_rx++] = rxm;
 
@@ -587,7 +587,7 @@ rcd_done:
                        if (nb_rxd > rxq->cmd_ring[0].size) {
                                PMD_RX_LOG(ERR,
                                           "Used up quota of receiving packets,"
-                                          " relinquish control.\n");
+                                          " relinquish control.");
                                break;
                        }
                }
@@ -627,31 +627,30 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
                           unsigned int socket_id,
                           __attribute__((unused)) const struct rte_eth_txconf *tx_conf)
 {
+       struct vmxnet3_hw     *hw = dev->data->dev_private;
        const struct rte_memzone *mz;
        struct vmxnet3_tx_queue *txq;
-       struct vmxnet3_hw     *hw;
        struct vmxnet3_cmd_ring *ring;
        struct vmxnet3_comp_ring *comp_ring;
        int size;
 
        PMD_INIT_FUNC_TRACE();
-       hw = VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS) !=
            ETH_TXQ_FLAGS_NOMULTSEGS) {
-               PMD_INIT_LOG(ERR, "TX Multi segment not support yet\n");
+               PMD_INIT_LOG(ERR, "TX Multi segment not support yet");
                return -EINVAL;
        }
 
        if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOOFFLOADS) !=
            ETH_TXQ_FLAGS_NOOFFLOADS) {
-               PMD_INIT_LOG(ERR, "TX not support offload function yet\n");
+               PMD_INIT_LOG(ERR, "TX not support offload function yet");
                return -EINVAL;
        }
 
        txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue), CACHE_LINE_SIZE);
        if (txq == NULL) {
-               PMD_INIT_LOG(ERR, "Can not allocate tx queue structure\n");
+               PMD_INIT_LOG(ERR, "Can not allocate tx queue structure");
                return -ENOMEM;
        }
 
@@ -667,11 +666,11 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
        /* Tx vmxnet ring length should be between 512-4096 */
        if (nb_desc < VMXNET3_DEF_TX_RING_SIZE) {
-               PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Min: %u\n",
+               PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Min: %u",
                             VMXNET3_DEF_TX_RING_SIZE);
                return -EINVAL;
        } else if (nb_desc > VMXNET3_TX_RING_MAX_SIZE) {
-               PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Max: %u\n",
+               PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Max: %u",
                             VMXNET3_TX_RING_MAX_SIZE);
                return -EINVAL;
        } else {
@@ -692,7 +691,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
        mz = ring_dma_zone_reserve(dev, "txdesc", queue_idx, size, socket_id);
        if (mz == NULL) {
-               PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone\n");
+               PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
                return -ENOMEM;
        }
        memset(mz->addr, 0, mz->len);
@@ -710,7 +709,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
        ring->buf_info = rte_zmalloc("tx_ring_buf_info",
                                     ring->size * sizeof(vmxnet3_buf_info_t), CACHE_LINE_SIZE);
        if (ring->buf_info == NULL) {
-               PMD_INIT_LOG(ERR, "ERROR: Creating tx_buf_info structure\n");
+               PMD_INIT_LOG(ERR, "ERROR: Creating tx_buf_info structure");
                return -ENOMEM;
        }
 
@@ -730,7 +729,7 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
 {
        const struct rte_memzone *mz;
        struct vmxnet3_rx_queue *rxq;
-       struct vmxnet3_hw     *hw;
+       struct vmxnet3_hw     *hw = dev->data->dev_private;
        struct vmxnet3_cmd_ring *ring0, *ring1, *ring;
        struct vmxnet3_comp_ring *comp_ring;
        int size;
@@ -740,7 +739,6 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
        struct rte_pktmbuf_pool_private *mbp_priv;
 
        PMD_INIT_FUNC_TRACE();
-       hw = VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        mbp_priv = (struct rte_pktmbuf_pool_private *)
                rte_mempool_get_priv(mp);
@@ -749,14 +747,14 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
 
        if (dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size) {
                PMD_INIT_LOG(ERR, "buf_size = %u, max_pkt_len = %u, "
-                            "VMXNET3 don't support scatter packets yet\n",
+                            "VMXNET3 don't support scatter packets yet",
                             buf_size, dev->data->dev_conf.rxmode.max_rx_pkt_len);
                return -EINVAL;
        }
 
        rxq = rte_zmalloc("ethdev_rx_queue", sizeof(struct vmxnet3_rx_queue), CACHE_LINE_SIZE);
        if (rxq == NULL) {
-               PMD_INIT_LOG(ERR, "Can not allocate rx queue structure\n");
+               PMD_INIT_LOG(ERR, "Can not allocate rx queue structure");
                return -ENOMEM;
        }
 
@@ -775,10 +773,10 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
 
        /* Rx vmxnet rings length should be between 256-4096 */
        if (nb_desc < VMXNET3_DEF_RX_RING_SIZE) {
-               PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Min: 256\n");
+               PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Min: 256");
                return -EINVAL;
        } else if (nb_desc > VMXNET3_RX_RING_MAX_SIZE) {
-               PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Max: 4096\n");
+               PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Max: 4096");
                return -EINVAL;
        } else {
                ring0->size = nb_desc;
@@ -803,7 +801,7 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
 
        mz = ring_dma_zone_reserve(dev, "rxdesc", queue_idx, size, socket_id);
        if (mz == NULL) {
-               PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone\n");
+               PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
                return -ENOMEM;
        }
        memset(mz->addr, 0, mz->len);
@@ -830,7 +828,7 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
 
                ring->buf_info = rte_zmalloc(mem_name, ring->size * sizeof(vmxnet3_buf_info_t), CACHE_LINE_SIZE);
                if (ring->buf_info == NULL) {
-                       PMD_INIT_LOG(ERR, "ERROR: Creating rx_buf_info structure\n");
+                       PMD_INIT_LOG(ERR, "ERROR: Creating rx_buf_info structure");
                        return -ENOMEM;
                }
        }
@@ -848,12 +846,12 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
 int
 vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev)
 {
-       struct vmxnet3_hw *hw;
+       struct vmxnet3_hw *hw = dev->data->dev_private;
+
        int i, ret;
        uint8_t j;
 
        PMD_INIT_FUNC_TRACE();
-       hw = VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        for (i = 0; i < hw->num_rx_queues; i++) {
                vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
@@ -862,7 +860,7 @@ vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev)
                        /* Passing 0 as alloc_num will allocate full ring */
                        ret = vmxnet3_post_rx_bufs(rxq, j);
                        if (ret <= 0) {
-                               PMD_INIT_LOG(ERR, "ERROR: Posting Rxq: %d buffers ring: %d\n", i, j);
+                               PMD_INIT_LOG(ERR, "ERROR: Posting Rxq: %d buffers ring: %d", i, j);
                                return -ret;
                        }
                        /* Updating device with the index:next2fill to fill the mbufs for coming packets */
@@ -903,14 +901,14 @@ vmxnet3_rss_configure(struct rte_eth_dev *dev)
                ETH_RSS_IPV6 | \
                ETH_RSS_IPV6_TCP)
 
-       struct vmxnet3_hw *hw;
+       struct vmxnet3_hw *hw = dev->data->dev_private;
        struct VMXNET3_RSSConf *dev_rss_conf;
        struct rte_eth_rss_conf *port_rss_conf;
        uint64_t rss_hf;
        uint8_t i, j;
 
        PMD_INIT_FUNC_TRACE();
-       hw = VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
        dev_rss_conf = hw->rss_conf;
        port_rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
 
@@ -958,7 +956,7 @@ int
 vmxnet3_vlan_configure(struct rte_eth_dev *dev)
 {
        uint8_t i;
-       struct vmxnet3_hw *hw = VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct vmxnet3_hw *hw = dev->data->dev_private;
        uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
 
        PMD_INIT_FUNC_TRACE();
@@ -969,7 +967,7 @@ vmxnet3_vlan_configure(struct rte_eth_dev *dev)
                vf_table[i] = 0;
                /* To-Do: Provide another routine in dev_ops for user config */
 
-               PMD_INIT_LOG(DEBUG, "Registering VLAN portid: %"PRIu8" tag %u\n",
+               PMD_INIT_LOG(DEBUG, "Registering VLAN portid: %"PRIu8" tag %u",
                                        dev->data->port_id, vf_table[i]);
        }