net/enic: fix calculation of truncated packets
[dpdk.git] / drivers / net / enic / enic_main.c
index 15389e5..329559a 100644 (file)
@@ -106,7 +106,7 @@ static void enic_free_wq_buf(struct vnic_wq_buf *buf)
 {
        struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->mb;
 
-       rte_mempool_put(mbuf->pool, mbuf);
+       rte_pktmbuf_free_seg(mbuf);
        buf->mb = NULL;
 }
 
@@ -172,7 +172,8 @@ void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
         * which can make ibytes be slightly higher than it should be.
         */
        rx_packet_errors = rte_atomic64_read(&soft_stats->rx_packet_errors);
-       rx_truncated = rx_packet_errors - stats->rx.rx_errors;
+       rx_truncated = rx_packet_errors - stats->rx.rx_errors -
+               stats->rx.rx_no_bufs;
 
        r_stats->ipackets = stats->rx.rx_frames_ok - rx_truncated;
        r_stats->opackets = stats->tx.tx_frames_ok;
@@ -203,7 +204,7 @@ void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)
                return;
        }
 
-       err = vnic_dev_del_addr(enic->vdev, mac_addr);
+       err = vnic_dev_del_addr(enic->vdev, enic->mac_addr);
        if (err) {
                dev_err(enic, "del mac addr failed\n");
                return;
@@ -238,19 +239,20 @@ void enic_init_vnic_resources(struct enic *enic)
        struct vnic_rq *data_rq;
 
        for (index = 0; index < enic->rq_count; index++) {
+               cq_idx = enic_cq_rq(enic, enic_sop_rq(index));
+
                vnic_rq_init(&enic->rq[enic_sop_rq(index)],
-                       enic_cq_rq(enic, index),
+                       cq_idx,
                        error_interrupt_enable,
                        error_interrupt_offset);
 
                data_rq = &enic->rq[enic_data_rq(index)];
                if (data_rq->in_use)
                        vnic_rq_init(data_rq,
-                                    enic_cq_rq(enic, index),
+                                    cq_idx,
                                     error_interrupt_enable,
                                     error_interrupt_offset);
 
-               cq_idx = enic_cq_rq(enic, index);
                vnic_cq_init(&enic->cq[cq_idx],
                        0 /* flow_control_enable */,
                        1 /* color_enable */,
@@ -333,6 +335,7 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
        dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n",
                enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold);
        iowrite32(rq->posted_index, &rq->ctrl->posted_index);
+       iowrite32(0, &rq->ctrl->fetch_index);
        rte_rmb();
 
        return 0;
@@ -454,6 +457,8 @@ int enic_enable(struct enic *enic)
        for (index = 0; index < enic->rq_count; index++)
                enic_start_rq(enic, index);
 
+       vnic_dev_add_addr(enic->vdev, enic->mac_addr);
+
        vnic_dev_enable_wait(enic->vdev);
 
        /* Register and enable error interrupt */
@@ -514,30 +519,41 @@ void enic_free_rq(void *rxq)
 
 void enic_start_wq(struct enic *enic, uint16_t queue_idx)
 {
+       struct rte_eth_dev *eth_dev = enic->rte_dev;
        vnic_wq_enable(&enic->wq[queue_idx]);
+       eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
 }
 
 int enic_stop_wq(struct enic *enic, uint16_t queue_idx)
 {
-       return vnic_wq_disable(&enic->wq[queue_idx]);
+       struct rte_eth_dev *eth_dev = enic->rte_dev;
+       int ret;
+
+       ret = vnic_wq_disable(&enic->wq[queue_idx]);
+       if (ret)
+               return ret;
+
+       eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
+       return 0;
 }
 
 void enic_start_rq(struct enic *enic, uint16_t queue_idx)
 {
        struct vnic_rq *rq_sop = &enic->rq[enic_sop_rq(queue_idx)];
        struct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx];
+       struct rte_eth_dev *eth_dev = enic->rte_dev;
 
        if (rq_data->in_use)
                vnic_rq_enable(rq_data);
        rte_mb();
        vnic_rq_enable(rq_sop);
-
+       eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
 }
 
 int enic_stop_rq(struct enic *enic, uint16_t queue_idx)
 {
        int ret1 = 0, ret2 = 0;
-
+       struct rte_eth_dev *eth_dev = enic->rte_dev;
        struct vnic_rq *rq_sop = &enic->rq[enic_sop_rq(queue_idx)];
        struct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx];
 
@@ -548,8 +564,11 @@ int enic_stop_rq(struct enic *enic, uint16_t queue_idx)
 
        if (ret2)
                return ret2;
-       else
+       else if (ret1)
                return ret1;
+
+       eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
+       return 0;
 }
 
 int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
@@ -899,7 +918,8 @@ static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
                return -ENOMEM;
 
        for (i = 0; i < (1 << rss_hash_bits); i++)
-               (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count;
+               (*rss_cpu_buf_va).cpu[i / 4].b[i % 4] =
+                       enic_sop_rq(i % enic->rq_count);
 
        err = enic_set_rss_cpu(enic,
                rss_cpu_buf_pa,
@@ -969,8 +989,6 @@ int enic_setup_finish(struct enic *enic)
                return -1;
        }
 
-       vnic_dev_add_addr(enic->vdev, enic->mac_addr);
-
        /* Default conf */
        vnic_dev_packet_filter(enic->vdev,
                1 /* directed  */,
@@ -1013,21 +1031,23 @@ int enic_set_vnic_res(struct enic *enic)
        /* With Rx scatter support, two RQs are now used per RQ used by
         * the application.
         */
-       if (enic->rq_count < (eth_dev->data->nb_rx_queues * 2)) {
+       if (enic->conf_rq_count < eth_dev->data->nb_rx_queues) {
                dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n",
                        eth_dev->data->nb_rx_queues,
-                       eth_dev->data->nb_rx_queues * 2, enic->rq_count);
+                       eth_dev->data->nb_rx_queues * 2, enic->conf_rq_count);
                rc = -EINVAL;
        }
-       if (enic->wq_count < eth_dev->data->nb_tx_queues) {
+       if (enic->conf_wq_count < eth_dev->data->nb_tx_queues) {
                dev_err(dev, "Not enough Transmit queues. Requested:%u, Configured:%u\n",
-                       eth_dev->data->nb_tx_queues, enic->wq_count);
+                       eth_dev->data->nb_tx_queues, enic->conf_wq_count);
                rc = -EINVAL;
        }
 
-       if (enic->cq_count < (enic->rq_count + enic->wq_count)) {
+       if (enic->conf_cq_count < (eth_dev->data->nb_rx_queues +
+                                  eth_dev->data->nb_tx_queues)) {
                dev_err(dev, "Not enough Completion queues. Required:%u, Configured:%u\n",
-                       enic->rq_count + enic->wq_count, enic->cq_count);
+                       (eth_dev->data->nb_rx_queues +
+                        eth_dev->data->nb_tx_queues), enic->conf_cq_count);
                rc = -EINVAL;
        }
 
@@ -1040,6 +1060,50 @@ int enic_set_vnic_res(struct enic *enic)
        return rc;
 }
 
+/* The Cisco NIC can send and receive packets up to a max packet size
+ * determined by the NIC type and firmware. There is also an MTU
+ * configured into the NIC via the CIMC/UCSM management interface
+ * which can be overridden by this function (up to the max packet size).
+ * Depending on the network setup, doing so may cause packet drops
+ * and unexpected behavior.
+ */
+int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
+{
+       uint16_t old_mtu;       /* previous setting */
+       uint16_t config_mtu;    /* Value configured into NIC via CIMC/UCSM */
+       struct rte_eth_dev *eth_dev = enic->rte_dev;
+
+       old_mtu = eth_dev->data->mtu;
+       config_mtu = enic->config.mtu;
+
+       /* only works with Rx scatter disabled */
+       if (enic->rte_dev->data->dev_conf.rxmode.enable_scatter)
+               return -ENOTSUP;
+
+       if (new_mtu > enic->max_mtu) {
+               dev_err(enic,
+                       "MTU not updated: requested (%u) greater than max (%u)\n",
+                       new_mtu, enic->max_mtu);
+               return -EINVAL;
+       }
+       if (new_mtu < ENIC_MIN_MTU) {
+               dev_info(enic,
+                       "MTU not updated: requested (%u) less than min (%u)\n",
+                       new_mtu, ENIC_MIN_MTU);
+               return -EINVAL;
+       }
+       if (new_mtu > config_mtu)
+               dev_warning(enic,
+                       "MTU (%u) is greater than value configured in NIC (%u)\n",
+                       new_mtu, config_mtu);
+
+       /* update the mtu */
+       eth_dev->data->mtu = new_mtu;
+
+       dev_info(enic, "MTU changed from %u to %u\n",  old_mtu, new_mtu);
+       return 0;
+}
+
 static int enic_dev_init(struct enic *enic)
 {
        int err;
@@ -1079,7 +1143,7 @@ int enic_probe(struct enic *enic)
        struct rte_pci_device *pdev = enic->pdev;
        int err = -1;
 
-       dev_debug(enic, " Initializing ENIC PMD version %s\n", DRV_VERSION);
+       dev_debug(enic, " Initializing ENIC PMD\n");
 
        enic->bar0.vaddr = (void *)pdev->mem_resource[0].addr;
        enic->bar0.len = pdev->mem_resource[0].len;