net/mlx5: remove flags setting from flow preparation
[dpdk.git] / drivers / net / netvsc / hn_rxtx.c
index cb5bc60..f4a3664 100644 (file)
@@ -217,6 +217,7 @@ hn_dev_tx_queue_setup(struct rte_eth_dev *dev,
        struct hn_data *hv = dev->data->dev_private;
        struct hn_tx_queue *txq;
        uint32_t tx_free_thresh;
+       int err;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -246,8 +247,14 @@ hn_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
        hn_reset_txagg(txq);
 
-       dev->data->tx_queues[queue_idx] = txq;
+       err = hn_vf_tx_queue_setup(dev, queue_idx, nb_desc,
+                                    socket_id, tx_conf);
+       if (err) {
+               rte_free(txq);
+               return err;
+       }
 
+       dev->data->tx_queues[queue_idx] = txq;
        return 0;
 }
 
@@ -270,17 +277,6 @@ hn_dev_tx_queue_release(void *arg)
        rte_free(txq);
 }
 
-void
-hn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,
-                    struct rte_eth_txq_info *qinfo)
-{
-       struct hn_data *hv = dev->data->dev_private;
-       struct hn_tx_queue *txq = dev->data->rx_queues[queue_idx];
-
-       qinfo->conf.tx_free_thresh = txq->free_thresh;
-       qinfo->nb_desc = hv->tx_pool->size;
-}
-
 static void
 hn_nvs_send_completed(struct rte_eth_dev *dev, uint16_t queue_id,
                      unsigned long xactid, const struct hn_nvs_rndis_ack *ack)
@@ -534,7 +530,7 @@ static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb,
        hn_update_packet_stats(&rxq->stats, m);
 
        if (unlikely(rte_ring_sp_enqueue(rxq->rx_ring, m) != 0)) {
-               ++rxq->ring_full;
+               ++rxq->stats.ring_full;
                rte_pktmbuf_free(m);
        }
 }
@@ -601,7 +597,7 @@ error:
 }
 
 static void
-hn_rndis_receive(const struct rte_eth_dev *dev, struct hn_rx_queue *rxq,
+hn_rndis_receive(struct rte_eth_dev *dev, struct hn_rx_queue *rxq,
                 struct hn_rx_bufinfo *rxb, void *buf, uint32_t len)
 {
        const struct rndis_msghdr *hdr = buf;
@@ -613,7 +609,7 @@ hn_rndis_receive(const struct rte_eth_dev *dev, struct hn_rx_queue *rxq,
                break;
 
        case RNDIS_INDICATE_STATUS_MSG:
-               hn_rndis_link_status(rxq->hv, buf);
+               hn_rndis_link_status(dev, buf);
                break;
 
        case RNDIS_INITIALIZE_CMPLT:
@@ -713,6 +709,35 @@ hn_nvs_handle_rxbuf(struct rte_eth_dev *dev,
        hn_rx_buf_release(rxb);
 }
 
+/*
+ * Called when NVS inband events are received.
+ * Send up a two part message with port_id and the NVS message
+ * to the pipe to the netvsc-vf-event control thread.
+ */
+static void hn_nvs_handle_notify(struct rte_eth_dev *dev,
+                                const struct vmbus_chanpkt_hdr *pkt,
+                                const void *data)
+{
+       const struct hn_nvs_hdr *hdr = data;
+
+       switch (hdr->type) {
+       case NVS_TYPE_TXTBL_NOTE:
+               /* Transmit indirection table has locking problems
+                * in DPDK and therefore not implemented
+                */
+               PMD_DRV_LOG(DEBUG, "host notify of transmit indirection table");
+               break;
+
+       case NVS_TYPE_VFASSOC_NOTE:
+               hn_nvs_handle_vfassoc(dev, pkt, data);
+               break;
+
+       default:
+               PMD_DRV_LOG(INFO,
+                           "got notify, nvs type %u", hdr->type);
+       }
+}
+
 struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv,
                                      uint16_t queue_id,
                                      unsigned int socket_id)
@@ -744,13 +769,14 @@ int
 hn_dev_rx_queue_setup(struct rte_eth_dev *dev,
                      uint16_t queue_idx, uint16_t nb_desc,
                      unsigned int socket_id,
-                     const struct rte_eth_rxconf *rx_conf __rte_unused,
+                     const struct rte_eth_rxconf *rx_conf,
                      struct rte_mempool *mp)
 {
        struct hn_data *hv = dev->data->dev_private;
        char ring_name[RTE_RING_NAMESIZE];
        struct hn_rx_queue *rxq;
        unsigned int count;
+       int error = -ENOMEM;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -780,6 +806,11 @@ hn_dev_rx_queue_setup(struct rte_eth_dev *dev,
        if (!rxq->rx_ring)
                goto fail;
 
+       error = hn_vf_rx_queue_setup(dev, queue_idx, nb_desc,
+                                    socket_id, rx_conf, mp);
+       if (error)
+               goto fail;
+
        dev->data->rx_queues[queue_idx] = rxq;
        return 0;
 
@@ -787,7 +818,7 @@ fail:
        rte_ring_free(rxq->rx_ring);
        rte_free(rxq->event_buf);
        rte_free(rxq);
-       return -ENOMEM;
+       return error;
 }
 
 void
@@ -804,6 +835,9 @@ hn_dev_rx_queue_release(void *arg)
        rxq->rx_ring = NULL;
        rxq->mb_pool = NULL;
 
+       hn_vf_rx_queue_release(rxq->hv, rxq->queue_id);
+
+       /* Keep primary queue to allow for control operations */
        if (rxq != rxq->hv->primary) {
                rte_free(rxq->event_buf);
                rte_free(rxq);
@@ -818,32 +852,6 @@ hn_dev_tx_done_cleanup(void *arg, uint32_t free_cnt)
        return hn_process_events(txq->hv, txq->queue_id, free_cnt);
 }
 
-void
-hn_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,
-                    struct rte_eth_rxq_info *qinfo)
-{
-       struct hn_rx_queue *rxq = dev->data->rx_queues[queue_idx];
-
-       qinfo->mp = rxq->mb_pool;
-       qinfo->scattered_rx = 1;
-       qinfo->nb_desc = rte_ring_get_capacity(rxq->rx_ring);
-}
-
-static void
-hn_nvs_handle_notify(const struct vmbus_chanpkt_hdr *pkthdr,
-                    const void *data)
-{
-       const struct hn_nvs_hdr *hdr = data;
-
-       if (unlikely(vmbus_chanpkt_datalen(pkthdr) < sizeof(*hdr))) {
-               PMD_DRV_LOG(ERR, "invalid nvs notify");
-               return;
-       }
-
-       PMD_DRV_LOG(INFO,
-                   "got notify, nvs type %u", hdr->type);
-}
-
 /*
  * Process pending events on the channel.
  * Called from both Rx queue poll and Tx cleanup
@@ -916,7 +924,7 @@ retry:
                        break;
 
                case VMBUS_CHANPKT_TYPE_INBAND:
-                       hn_nvs_handle_notify(pkt, data);
+                       hn_nvs_handle_notify(dev, pkt, data);
                        break;
 
                default:
@@ -1007,7 +1015,7 @@ static struct hn_txdesc *hn_new_txd(struct hn_data *hv,
        struct hn_txdesc *txd;
 
        if (rte_mempool_get(hv->tx_pool, (void **)&txd)) {
-               ++txq->stats.nomemory;
+               ++txq->stats.ring_full;
                PMD_TX_LOG(DEBUG, "tx pool exhausted!");
                return NULL;
        }
@@ -1275,7 +1283,9 @@ uint16_t
 hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
        struct hn_tx_queue *txq = ptxq;
+       uint16_t queue_id = txq->queue_id;
        struct hn_data *hv = txq->hv;
+       struct rte_eth_dev *vf_dev;
        bool need_sig = false;
        uint16_t nb_tx;
        int ret;
@@ -1283,6 +1293,15 @@ hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
        if (unlikely(hv->closed))
                return 0;
 
+       /* Transmit over VF if present and up */
+       vf_dev = hv->vf_dev;
+       rte_compiler_barrier();
+       if (vf_dev && vf_dev->data->dev_started) {
+               void *sub_q = vf_dev->data->tx_queues[queue_id];
+
+               return (*vf_dev->tx_pkt_burst)(sub_q, tx_pkts, nb_pkts);
+       }
+
        if (rte_mempool_avail_count(hv->tx_pool) <= txq->free_thresh)
                hn_process_events(hv, txq->queue_id, 0);
 
@@ -1304,7 +1323,7 @@ hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                        if (unlikely(!pkt))
                                break;
 
-                       hn_encap(pkt, txq->queue_id, m);
+                       hn_encap(pkt, queue_id, m);
                        hn_append_to_chim(txq, pkt, m);
 
                        rte_pktmbuf_free(m);
@@ -1331,7 +1350,7 @@ hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                        txd->data_size += m->pkt_len;
                        ++txd->packets;
 
-                       hn_encap(pkt, txq->queue_id, m);
+                       hn_encap(pkt, queue_id, m);
 
                        ret = hn_xmit_sg(txq, txd, m, &need_sig);
                        if (unlikely(ret != 0)) {
@@ -1360,15 +1379,36 @@ hn_recv_pkts(void *prxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
        struct hn_rx_queue *rxq = prxq;
        struct hn_data *hv = rxq->hv;
+       struct rte_eth_dev *vf_dev;
+       uint16_t nb_rcv;
 
        if (unlikely(hv->closed))
                return 0;
 
-       /* If ring is empty then process more */
-       if (rte_ring_count(rxq->rx_ring) < nb_pkts)
+       vf_dev = hv->vf_dev;
+       rte_compiler_barrier();
+
+       if (vf_dev && vf_dev->data->dev_started) {
+               /* Normally, with SR-IOV the ring buffer will be empty */
                hn_process_events(hv, rxq->queue_id, 0);
 
-       /* Get mbufs off staging ring */
-       return rte_ring_sc_dequeue_burst(rxq->rx_ring, (void **)rx_pkts,
-                                        nb_pkts, NULL);
+               /* Get mbufs some bufs off of staging ring */
+               nb_rcv = rte_ring_sc_dequeue_burst(rxq->rx_ring,
+                                                  (void **)rx_pkts,
+                                                  nb_pkts / 2, NULL);
+               /* And rest off of VF */
+               nb_rcv += rte_eth_rx_burst(vf_dev->data->port_id,
+                                          rxq->queue_id,
+                                          rx_pkts + nb_rcv, nb_pkts - nb_rcv);
+       } else {
+               /* If receive ring is not full then get more */
+               if (rte_ring_count(rxq->rx_ring) < nb_pkts)
+                       hn_process_events(hv, rxq->queue_id, 0);
+
+               nb_rcv = rte_ring_sc_dequeue_burst(rxq->rx_ring,
+                                                  (void **)rx_pkts,
+                                                  nb_pkts, NULL);
+       }
+
+       return nb_rcv;
 }