{
txq_info->vsi_id = vsi_id;
txq_info->queue_id = queue_id;
- if (queue_id < nb_txq) {
+ if (queue_id < nb_txq && txq) {
txq_info->ring_len = txq->nb_tx_desc;
txq_info->dma_ring_addr = txq->tx_ring_phys_addr;
}
rxq_info->vsi_id = vsi_id;
rxq_info->queue_id = queue_id;
rxq_info->max_pkt_size = max_pkt_size;
- if (queue_id < nb_rxq) {
+ if (queue_id < nb_rxq && rxq) {
rxq_info->ring_len = rxq->nb_rx_desc;
rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
rxq_info->databuffer_size =
for (i = 0, vc_qpi = vc_vqci->qpair; i < nb_qp; i++, vc_qpi++) {
i40evf_fill_virtchnl_vsi_txq_info(&vc_qpi->txq,
- vc_vqci->vsi_id, i, dev->data->nb_tx_queues, txq[i]);
+ vc_vqci->vsi_id, i, dev->data->nb_tx_queues,
+ txq ? txq[i] : NULL);
i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpi->rxq,
vc_vqci->vsi_id, i, dev->data->nb_rx_queues,
- vf->max_pkt_len, rxq[i]);
+ vf->max_pkt_len, rxq ? rxq[i] : NULL);
}
memset(&args, 0, sizeof(args));
args.ops = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
/* copy mac addr */
eth_dev->data->mac_addrs = rte_zmalloc("i40evf_mac",
- ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX,
- 0);
+ RTE_ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX,
+ 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
" store MAC addresses",
- ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX);
+ RTE_ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX);
return -ENOMEM;
}
rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
* Check if the jumbo frame and maximum packet length are set correctly
*/
if (dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
+ if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
"larger than %u and smaller than %u, as jumbo "
- "frame is enabled", (uint32_t)ETHER_MAX_LEN,
+ "frame is enabled", (uint32_t)RTE_ETHER_MAX_LEN,
(uint32_t)I40E_FRAME_SIZE_MAX);
return I40E_ERR_CONFIG;
}
} else {
- if (rxq->max_pkt_len < ETHER_MIN_LEN ||
- rxq->max_pkt_len > ETHER_MAX_LEN) {
+ if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
+ rxq->max_pkt_len > RTE_ETHER_MAX_LEN) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
"larger than %u and smaller than %u, as jumbo "
- "frame is disabled", (uint32_t)ETHER_MIN_LEN,
- (uint32_t)ETHER_MAX_LEN);
+ "frame is disabled",
+ (uint32_t)RTE_ETHER_MIN_LEN,
+ (uint32_t)RTE_ETHER_MAX_LEN);
return I40E_ERR_CONFIG;
}
}
dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
dev_info->max_mtu = dev_info->max_rx_pktlen - I40E_ETH_OVERHEAD;
- dev_info->min_mtu = ETHER_MIN_MTU;
+ dev_info->min_mtu = RTE_ETHER_MIN_MTU;
dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask;
int ret = 0;
/* check if mtu is within the allowed range */
- if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
+ if (mtu < RTE_ETHER_MIN_MTU || frame_size > I40E_FRAME_SIZE_MAX)
return -EINVAL;
/* mtu setting is forbidden if port is start */
return -EBUSY;
}
- if (frame_size > ETHER_MAX_LEN)
+ if (frame_size > RTE_ETHER_MAX_LEN)
dev_data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
return 0;
}
+
+bool
+is_i40evf_supported(struct rte_eth_dev *dev)
+{
+ return is_device_supported(dev, &rte_i40evf_pmd);
+}