net/bnxt: support Tx descriptor status
[dpdk.git] / drivers / net / bnxt / bnxt_ethdev.c
index c9d1122..97ddca0 100644 (file)
@@ -360,6 +360,7 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
 {
        struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
        uint16_t max_vnics, i, j, vpool, vrxq;
+       unsigned int max_rx_rings;
 
        dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
 
@@ -370,9 +371,14 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
        /* PF/VF specifics */
        if (BNXT_PF(bp))
                dev_info->max_vfs = bp->pdev->max_vfs;
-       dev_info->max_rx_queues = bp->max_rx_rings;
-       dev_info->max_tx_queues = bp->max_tx_rings;
+       max_rx_rings = RTE_MIN(bp->max_vnics, RTE_MIN(bp->max_l2_ctx,
+                                               RTE_MIN(bp->max_rsscos_ctx,
+                                               bp->max_stat_ctx)));
+       /* For the sake of symmetry, max_rx_queues = max_tx_queues */
+       dev_info->max_rx_queues = max_rx_rings;
+       dev_info->max_tx_queues = max_rx_rings;
        dev_info->reta_size = bp->max_rsscos_ctx;
+       dev_info->hash_key_size = 40;
        max_vnics = bp->max_vnics;
 
        /* Fast path specifics */
@@ -827,11 +833,15 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
         */
        if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
                if (!rss_conf->rss_hf)
-                       return -EINVAL;
+                       RTE_LOG(ERR, PMD, "Hash type NONE\n");
        } else {
                if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
                        return -EINVAL;
        }
+
+       bp->flags |= BNXT_FLAG_UPDATE_HASH;
+       memcpy(&bp->rss_conf, rss_conf, sizeof(*rss_conf));
+
        if (rss_conf->rss_hf & ETH_RSS_IPV4)
                hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
        if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
@@ -1517,6 +1527,126 @@ bnxt_dev_led_off_op(struct rte_eth_dev *dev)
        return bnxt_hwrm_port_led_cfg(bp, false);
 }
 
+static uint32_t
+bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+       uint32_t desc = 0, raw_cons = 0, cons;
+       struct bnxt_cp_ring_info *cpr;
+       struct bnxt_rx_queue *rxq;
+       struct rx_pkt_cmpl *rxcmp;
+       uint16_t cmp_type;
+       uint8_t cmp = 1;
+       bool valid;
+
+       rxq = dev->data->rx_queues[rx_queue_id];
+       cpr = rxq->cp_ring;
+       valid = cpr->valid;
+
+       while (raw_cons < rxq->nb_rx_desc) {
+               cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
+               rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
+
+               if (!CMPL_VALID(rxcmp, valid))
+                       goto nothing_to_do;
+               valid = FLIP_VALID(cons, cpr->cp_ring_struct->ring_mask, valid);
+               cmp_type = CMP_TYPE(rxcmp);
+               if (cmp_type == RX_PKT_CMPL_TYPE_RX_L2_TPA_END) {
+                       cmp = (rte_le_to_cpu_32(
+                                       ((struct rx_tpa_end_cmpl *)
+                                        (rxcmp))->agg_bufs_v1) &
+                              RX_TPA_END_CMPL_AGG_BUFS_MASK) >>
+                               RX_TPA_END_CMPL_AGG_BUFS_SFT;
+                       desc++;
+               } else if (cmp_type == 0x11) {
+                       desc++;
+                       cmp = (rxcmp->agg_bufs_v1 &
+                                  RX_PKT_CMPL_AGG_BUFS_MASK) >>
+                               RX_PKT_CMPL_AGG_BUFS_SFT;
+               } else {
+                       cmp = 1;
+               }
+nothing_to_do:
+               raw_cons += cmp ? cmp : 2;
+       }
+
+       return desc;
+}
+
+static int
+bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset)
+{
+       struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
+       struct bnxt_rx_ring_info *rxr;
+       struct bnxt_cp_ring_info *cpr;
+       struct bnxt_sw_rx_bd *rx_buf;
+       struct rx_pkt_cmpl *rxcmp;
+       uint32_t cons, cp_cons;
+
+       if (!rxq)
+               return -EINVAL;
+
+       cpr = rxq->cp_ring;
+       rxr = rxq->rx_ring;
+
+       if (offset >= rxq->nb_rx_desc)
+               return -EINVAL;
+
+       cons = RING_CMP(cpr->cp_ring_struct, offset);
+       cp_cons = cpr->cp_raw_cons;
+       rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
+
+       if (cons > cp_cons) {
+               if (CMPL_VALID(rxcmp, cpr->valid))
+                       return RTE_ETH_RX_DESC_DONE;
+       } else {
+               if (CMPL_VALID(rxcmp, !cpr->valid))
+                       return RTE_ETH_RX_DESC_DONE;
+       }
+       rx_buf = &rxr->rx_buf_ring[cons];
+       if (rx_buf->mbuf == NULL)
+               return RTE_ETH_RX_DESC_UNAVAIL;
+
+
+       return RTE_ETH_RX_DESC_AVAIL;
+}
+
+static int
+bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset)
+{
+       struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
+       struct bnxt_tx_ring_info *txr;
+       struct bnxt_cp_ring_info *cpr;
+       struct bnxt_sw_tx_bd *tx_buf;
+       struct tx_pkt_cmpl *txcmp;
+       uint32_t cons, cp_cons;
+
+       if (!txq)
+               return -EINVAL;
+
+       cpr = txq->cp_ring;
+       txr = txq->tx_ring;
+
+       if (offset >= txq->nb_tx_desc)
+               return -EINVAL;
+
+       cons = RING_CMP(cpr->cp_ring_struct, offset);
+       txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
+       cp_cons = cpr->cp_raw_cons;
+
+       if (cons > cp_cons) {
+               if (CMPL_VALID(txcmp, cpr->valid))
+                       return RTE_ETH_TX_DESC_UNAVAIL;
+       } else {
+               if (CMPL_VALID(txcmp, !cpr->valid))
+                       return RTE_ETH_TX_DESC_UNAVAIL;
+       }
+       tx_buf = &txr->tx_buf_ring[cons];
+       if (tx_buf->mbuf == NULL)
+               return RTE_ETH_TX_DESC_DONE;
+
+       return RTE_ETH_TX_DESC_FULL;
+}
+
 /*
  * Initialization
  */
@@ -1564,6 +1694,11 @@ static const struct eth_dev_ops bnxt_dev_ops = {
        .txq_info_get = bnxt_txq_info_get_op,
        .dev_led_on = bnxt_dev_led_on_op,
        .dev_led_off = bnxt_dev_led_off_op,
+       .xstats_get_by_id = bnxt_dev_xstats_get_by_id_op,
+       .xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op,
+       .rx_queue_count = bnxt_rx_queue_count_op,
+       .rx_descriptor_status = bnxt_rx_descriptor_status_op,
+       .tx_descriptor_status = bnxt_tx_descriptor_status_op,
 };
 
 static bool bnxt_vf_pciid(uint16_t id)
@@ -1643,6 +1778,9 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
        rte_atomic64_init(&bp->rx_mbuf_alloc_fail);
        bp->dev_stopped = 1;
 
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+               goto skip_init;
+
        if (bnxt_vf_pciid(pci_dev->id.device_id))
                bp->flags |= BNXT_FLAG_VF;
 
@@ -1652,7 +1790,10 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
                        "Board initialization failed rc: %x\n", rc);
                goto error;
        }
+skip_init:
        eth_dev->dev_ops = &bnxt_dev_ops;
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+               return 0;
        eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
        eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
 
@@ -1877,6 +2018,9 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev) {
        struct bnxt *bp = eth_dev->data->dev_private;
        int rc;
 
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+               return -EPERM;
+
        bnxt_disable_int(bp);
        bnxt_free_int(bp);
        bnxt_free_mem(bp);