mbuf: mark old VLAN offload flags as deprecated
[dpdk.git] / drivers / net / netvsc / hn_rxtx.c
index 65f1aba..33ec698 100644 (file)
@@ -40,9 +40,6 @@
        (sizeof(struct vmbus_chanpkt_hdr) + sizeof(struct hn_nvs_rndis))
 
 #define HN_TXD_CACHE_SIZE      32 /* per cpu tx_descriptor pool cache */
-#define HN_TXCOPY_THRESHOLD    512
-
-#define HN_RXCOPY_THRESHOLD    256
 #define HN_RXQ_EVENT_DEFAULT   2048
 
 struct hn_rxinfo {
@@ -252,16 +249,6 @@ hn_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
        PMD_INIT_FUNC_TRACE();
 
-       txq = rte_zmalloc_socket("HN_TXQ", sizeof(*txq), RTE_CACHE_LINE_SIZE,
-                                socket_id);
-       if (!txq)
-               return -ENOMEM;
-
-       txq->hv = hv;
-       txq->chan = hv->channels[queue_idx];
-       txq->port_id = dev->data->port_id;
-       txq->queue_id = queue_idx;
-
        tx_free_thresh = tx_conf->tx_free_thresh;
        if (tx_free_thresh == 0)
                tx_free_thresh = RTE_MIN(nb_desc / 4,
@@ -276,6 +263,15 @@ hn_dev_tx_queue_setup(struct rte_eth_dev *dev,
                return -EINVAL;
        }
 
+       txq = rte_zmalloc_socket("HN_TXQ", sizeof(*txq), RTE_CACHE_LINE_SIZE,
+                                socket_id);
+       if (!txq)
+               return -ENOMEM;
+
+       txq->hv = hv;
+       txq->chan = hv->channels[queue_idx];
+       txq->port_id = dev->data->port_id;
+       txq->queue_id = queue_idx;
        txq->free_thresh = tx_free_thresh;
 
        snprintf(name, sizeof(name),
@@ -284,10 +280,15 @@ hn_dev_tx_queue_setup(struct rte_eth_dev *dev,
        PMD_INIT_LOG(DEBUG, "TX descriptor pool %s n=%u size=%zu",
                     name, nb_desc, sizeof(struct hn_txdesc));
 
-       txq->tx_rndis = rte_calloc("hn_txq_rndis", nb_desc,
-                                  HN_RNDIS_PKT_ALIGNED, RTE_CACHE_LINE_SIZE);
-       if (txq->tx_rndis == NULL)
+       txq->tx_rndis_mz = rte_memzone_reserve_aligned(name,
+                       nb_desc * HN_RNDIS_PKT_ALIGNED, rte_socket_id(),
+                       RTE_MEMZONE_IOVA_CONTIG, HN_RNDIS_PKT_ALIGNED);
+       if (!txq->tx_rndis_mz) {
+               err = -rte_errno;
                goto error;
+       }
+       txq->tx_rndis = txq->tx_rndis_mz->addr;
+       txq->tx_rndis_iova = txq->tx_rndis_mz->iova;
 
        txq->txdesc_pool = rte_mempool_create(name, nb_desc,
                                              sizeof(struct hn_txdesc),
@@ -316,7 +317,7 @@ hn_dev_tx_queue_setup(struct rte_eth_dev *dev,
 error:
        if (txq->txdesc_pool)
                rte_mempool_free(txq->txdesc_pool);
-       rte_free(txq->tx_rndis);
+       rte_memzone_free(txq->tx_rndis_mz);
        rte_free(txq);
        return err;
 }
@@ -355,9 +356,9 @@ static void hn_txd_put(struct hn_tx_queue *txq, struct hn_txdesc *txd)
 }
 
 void
-hn_dev_tx_queue_release(void *arg)
+hn_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct hn_tx_queue *txq = arg;
+       struct hn_tx_queue *txq = dev->data->tx_queues[qid];
 
        PMD_INIT_FUNC_TRACE();
 
@@ -367,7 +368,7 @@ hn_dev_tx_queue_release(void *arg)
        if (txq->txdesc_pool)
                rte_mempool_free(txq->txdesc_pool);
 
-       rte_free(txq->tx_rndis);
+       rte_memzone_free(txq->tx_rndis_mz);
        rte_free(txq);
 }
 
@@ -569,7 +570,7 @@ static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb,
         * For large packets, avoid copy if possible but need to keep
         * some space available in receive area for later packets.
         */
-       if (dlen >= HN_RXCOPY_THRESHOLD &&
+       if (hv->rx_extmbuf_enable && dlen > hv->rx_copybreak &&
            (uint32_t)rte_atomic32_read(&rxq->rxbuf_outstanding) <
                        hv->rxbuf_section_cnt / 2) {
                struct rte_mbuf_ext_shared_info *shinfo;
@@ -1003,9 +1004,9 @@ hn_rx_queue_free(struct hn_rx_queue *rxq, bool keep_primary)
 }
 
 void
-hn_dev_rx_queue_release(void *arg)
+hn_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct hn_rx_queue *rxq = arg;
+       struct hn_rx_queue *rxq = dev->data->rx_queues[qid];
 
        PMD_INIT_FUNC_TRACE();
 
@@ -1017,9 +1018,9 @@ hn_dev_rx_queue_release(void *arg)
  * For this device that means how many packets are pending in the ring.
  */
 uint32_t
-hn_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_id)
+hn_dev_rx_queue_count(void *rx_queue)
 {
-       struct hn_rx_queue *rxq = dev->data->rx_queues[queue_id];
+       struct hn_rx_queue *rxq = rx_queue;
 
        return rte_ring_count(rxq->rx_ring);
 }
@@ -1330,7 +1331,7 @@ static void hn_encap(struct rndis_packet_msg *pkt,
                                          NDIS_PKTINFO_TYPE_HASHVAL);
        *pi_data = queue_id;
 
-       if (m->ol_flags & PKT_TX_VLAN_PKT) {
+       if (m->ol_flags & PKT_TX_VLAN) {
                pi_data = hn_rndis_pktinfo_append(pkt, NDIS_VLAN_INFO_SIZE,
                                                  NDIS_PKTINFO_TYPE_VLAN);
                *pi_data = m->vlan_tci;
@@ -1386,7 +1387,8 @@ static unsigned int hn_get_slots(const struct rte_mbuf *m)
                unsigned int size = rte_pktmbuf_data_len(m);
                unsigned int offs = rte_mbuf_data_iova(m) & PAGE_MASK;
 
-               slots += (offs + size + PAGE_SIZE - 1) / PAGE_SIZE;
+               slots += (offs + size + rte_mem_page_size() - 1) /
+                               rte_mem_page_size();
                m = m->next;
        }
 
@@ -1401,12 +1403,13 @@ static unsigned int hn_fill_sg(struct vmbus_gpa *sg,
 
        while (m) {
                rte_iova_t addr = rte_mbuf_data_iova(m);
-               unsigned int page = addr / PAGE_SIZE;
+               unsigned int page = addr / rte_mem_page_size();
                unsigned int offset = addr & PAGE_MASK;
                unsigned int len = rte_pktmbuf_data_len(m);
 
                while (len > 0) {
-                       unsigned int bytes = RTE_MIN(len, PAGE_SIZE - offset);
+                       unsigned int bytes = RTE_MIN(len,
+                                       rte_mem_page_size() - offset);
 
                        sg[segs].page = page;
                        sg[segs].ofs = offset;
@@ -1446,14 +1449,10 @@ static int hn_xmit_sg(struct hn_tx_queue *txq,
        hn_rndis_dump(txd->rndis_pkt);
 
        /* pass IOVA of rndis header in first segment */
-       addr = rte_malloc_virt2iova(txq->tx_rndis);
-       if (unlikely(addr == RTE_BAD_IOVA)) {
-               PMD_DRV_LOG(ERR, "RNDIS transmit can not get iova");
-               return -EINVAL;
-       }
-       addr = addr + ((char *)txd->rndis_pkt - (char *)txq->tx_rndis);
+       addr = txq->tx_rndis_iova +
+               ((char *)txd->rndis_pkt - (char *)txq->tx_rndis);
 
-       sg[0].page = addr / PAGE_SIZE;
+       sg[0].page = addr / rte_mem_page_size();
        sg[0].ofs = addr & PAGE_MASK;
        sg[0].len = RNDIS_PACKET_MSG_OFFSET_ABS(hn_rndis_pktlen(txd->rndis_pkt));
        segs = 1;
@@ -1495,16 +1494,20 @@ hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                hn_process_events(hv, txq->queue_id, 0);
 
        /* Transmit over VF if present and up */
-       rte_rwlock_read_lock(&hv->vf_lock);
-       vf_dev = hn_get_vf_dev(hv);
-       if (vf_dev && vf_dev->data->dev_started) {
-               void *sub_q = vf_dev->data->tx_queues[queue_id];
-
-               nb_tx = (*vf_dev->tx_pkt_burst)(sub_q, tx_pkts, nb_pkts);
+       if (hv->vf_ctx.vf_vsc_switched) {
+               rte_rwlock_read_lock(&hv->vf_lock);
+               vf_dev = hn_get_vf_dev(hv);
+               if (hv->vf_ctx.vf_vsc_switched && vf_dev &&
+                   vf_dev->data->dev_started) {
+                       void *sub_q = vf_dev->data->tx_queues[queue_id];
+
+                       nb_tx = (*vf_dev->tx_pkt_burst)
+                                       (sub_q, tx_pkts, nb_pkts);
+                       rte_rwlock_read_unlock(&hv->vf_lock);
+                       return nb_tx;
+               }
                rte_rwlock_read_unlock(&hv->vf_lock);
-               return nb_tx;
        }
-       rte_rwlock_read_unlock(&hv->vf_lock);
 
        for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
                struct rte_mbuf *m = tx_pkts[nb_tx];
@@ -1517,7 +1520,8 @@ hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                        break;
 
                /* For small packets aggregate them in chimney buffer */
-               if (m->pkt_len < HN_TXCOPY_THRESHOLD && pkt_size <= txq->agg_szmax) {
+               if (m->pkt_len <= hv->tx_copybreak &&
+                   pkt_size <= txq->agg_szmax) {
                        /* If this packet will not fit, then flush  */
                        if (txq->agg_pktleft == 0 ||
                            RTE_ALIGN(pkt_size, txq->agg_align) > txq->agg_szleft) {
@@ -1616,13 +1620,17 @@ hn_recv_pkts(void *prxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                                           (void **)rx_pkts, nb_pkts, NULL);
 
        /* If VF is available, check that as well */
-       rte_rwlock_read_lock(&hv->vf_lock);
-       vf_dev = hn_get_vf_dev(hv);
-       if (vf_dev && vf_dev->data->dev_started)
-               nb_rcv += hn_recv_vf(vf_dev->data->port_id, rxq,
-                                    rx_pkts + nb_rcv, nb_pkts - nb_rcv);
+       if (hv->vf_ctx.vf_vsc_switched) {
+               rte_rwlock_read_lock(&hv->vf_lock);
+               vf_dev = hn_get_vf_dev(hv);
+               if (hv->vf_ctx.vf_vsc_switched && vf_dev &&
+                   vf_dev->data->dev_started)
+                       nb_rcv += hn_recv_vf(vf_dev->data->port_id, rxq,
+                                            rx_pkts + nb_rcv,
+                                            nb_pkts - nb_rcv);
 
-       rte_rwlock_read_unlock(&hv->vf_lock);
+               rte_rwlock_read_unlock(&hv->vf_lock);
+       }
        return nb_rcv;
 }
 
@@ -1640,7 +1648,7 @@ hn_dev_free_queues(struct rte_eth_dev *dev)
        dev->data->nb_rx_queues = 0;
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               hn_dev_tx_queue_release(dev->data->tx_queues[i]);
+               hn_dev_tx_queue_release(dev, i);
                dev->data->tx_queues[i] = NULL;
        }
        dev->data->nb_tx_queues = 0;