examples/ip_frag: fix use of ethdev internal device array
[dpdk.git] / drivers / net / i40e / i40e_ethdev_vf.c
index f7f193c..5be32b0 100644 (file)
@@ -573,7 +573,7 @@ i40evf_fill_virtchnl_vsi_txq_info(struct virtchnl_txq_info *txq_info,
 {
        txq_info->vsi_id = vsi_id;
        txq_info->queue_id = queue_id;
-       if (queue_id < nb_txq) {
+       if (queue_id < nb_txq && txq) {
                txq_info->ring_len = txq->nb_tx_desc;
                txq_info->dma_ring_addr = txq->tx_ring_phys_addr;
        }
@@ -590,7 +590,7 @@ i40evf_fill_virtchnl_vsi_rxq_info(struct virtchnl_rxq_info *rxq_info,
        rxq_info->vsi_id = vsi_id;
        rxq_info->queue_id = queue_id;
        rxq_info->max_pkt_size = max_pkt_size;
-       if (queue_id < nb_rxq) {
+       if (queue_id < nb_rxq && rxq) {
                rxq_info->ring_len = rxq->nb_rx_desc;
                rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
                rxq_info->databuffer_size =
@@ -623,10 +623,11 @@ i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
 
        for (i = 0, vc_qpi = vc_vqci->qpair; i < nb_qp; i++, vc_qpi++) {
                i40evf_fill_virtchnl_vsi_txq_info(&vc_qpi->txq,
-                       vc_vqci->vsi_id, i, dev->data->nb_tx_queues, txq[i]);
+                       vc_vqci->vsi_id, i, dev->data->nb_tx_queues,
+                       txq ? txq[i] : NULL);
                i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpi->rxq,
                        vc_vqci->vsi_id, i, dev->data->nb_rx_queues,
-                                       vf->max_pkt_len, rxq[i]);
+                       vf->max_pkt_len, rxq ? rxq[i] : NULL);
        }
        memset(&args, 0, sizeof(args));
        args.ops = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
@@ -1505,12 +1506,12 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev)
 
        /* copy mac addr */
        eth_dev->data->mac_addrs = rte_zmalloc("i40evf_mac",
-                                       ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX,
-                                       0);
+                               RTE_ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX,
+                               0);
        if (eth_dev->data->mac_addrs == NULL) {
                PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
                                " store MAC addresses",
-                               ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX);
+                               RTE_ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX);
                return -ENOMEM;
        }
        rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
@@ -1767,21 +1768,22 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
         * Check if the jumbo frame and maximum packet length are set correctly
         */
        if (dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
-               if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
+               if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN ||
                    rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
                        PMD_DRV_LOG(ERR, "maximum packet length must be "
                                "larger than %u and smaller than %u, as jumbo "
-                               "frame is enabled", (uint32_t)ETHER_MAX_LEN,
+                               "frame is enabled", (uint32_t)RTE_ETHER_MAX_LEN,
                                        (uint32_t)I40E_FRAME_SIZE_MAX);
                        return I40E_ERR_CONFIG;
                }
        } else {
-               if (rxq->max_pkt_len < ETHER_MIN_LEN ||
-                   rxq->max_pkt_len > ETHER_MAX_LEN) {
+               if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
+                   rxq->max_pkt_len > RTE_ETHER_MAX_LEN) {
                        PMD_DRV_LOG(ERR, "maximum packet length must be "
                                "larger than %u and smaller than %u, as jumbo "
-                               "frame is disabled", (uint32_t)ETHER_MIN_LEN,
-                                               (uint32_t)ETHER_MAX_LEN);
+                               "frame is disabled",
+                               (uint32_t)RTE_ETHER_MIN_LEN,
+                               (uint32_t)RTE_ETHER_MAX_LEN);
                        return I40E_ERR_CONFIG;
                }
        }
@@ -2218,7 +2220,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
        dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
        dev_info->max_mtu = dev_info->max_rx_pktlen - I40E_ETH_OVERHEAD;
-       dev_info->min_mtu = ETHER_MIN_MTU;
+       dev_info->min_mtu = RTE_ETHER_MIN_MTU;
        dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
        dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
        dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask;
@@ -2680,7 +2682,7 @@ i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
        int ret = 0;
 
        /* check if mtu is within the allowed range */
-       if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
+       if (mtu < RTE_ETHER_MIN_MTU || frame_size > I40E_FRAME_SIZE_MAX)
                return -EINVAL;
 
        /* mtu setting is forbidden if port is start */
@@ -2690,7 +2692,7 @@ i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
                return -EBUSY;
        }
 
-       if (frame_size > ETHER_MAX_LEN)
+       if (frame_size > RTE_ETHER_MAX_LEN)
                dev_data->dev_conf.rxmode.offloads |=
                        DEV_RX_OFFLOAD_JUMBO_FRAME;
        else
@@ -2807,3 +2809,9 @@ i40evf_set_mc_addr_list(struct rte_eth_dev *dev,
 
        return 0;
 }
+
+bool
+is_i40evf_supported(struct rte_eth_dev *dev)
+{
+       return is_device_supported(dev, &rte_i40evf_pmd);
+}