#include <rte_interrupts.h>
#include <rte_debug.h>
#include <rte_pci.h>
+#include <rte_alarm.h>
#include <rte_atomic.h>
#include <rte_eal.h>
#include <rte_ether.h>
static const uint64_t map_hena_rss[] = {
/* IPv4 */
[IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
- ETH_RSS_NONFRAG_IPV4_UDP,
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP,
[IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
- ETH_RSS_NONFRAG_IPV4_UDP,
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP,
[IAVF_FILTER_PCTYPE_NONF_IPV4_UDP] =
- ETH_RSS_NONFRAG_IPV4_UDP,
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP,
[IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
- ETH_RSS_NONFRAG_IPV4_TCP,
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP,
[IAVF_FILTER_PCTYPE_NONF_IPV4_TCP] =
- ETH_RSS_NONFRAG_IPV4_TCP,
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP,
[IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP] =
- ETH_RSS_NONFRAG_IPV4_SCTP,
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP,
[IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER] =
- ETH_RSS_NONFRAG_IPV4_OTHER,
- [IAVF_FILTER_PCTYPE_FRAG_IPV4] = ETH_RSS_FRAG_IPV4,
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
+ [IAVF_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_RSS_FRAG_IPV4,
/* IPv6 */
[IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
- ETH_RSS_NONFRAG_IPV6_UDP,
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP,
[IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
- ETH_RSS_NONFRAG_IPV6_UDP,
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP,
[IAVF_FILTER_PCTYPE_NONF_IPV6_UDP] =
- ETH_RSS_NONFRAG_IPV6_UDP,
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP,
[IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
- ETH_RSS_NONFRAG_IPV6_TCP,
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP,
[IAVF_FILTER_PCTYPE_NONF_IPV6_TCP] =
- ETH_RSS_NONFRAG_IPV6_TCP,
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP,
[IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP] =
- ETH_RSS_NONFRAG_IPV6_SCTP,
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP,
[IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER] =
- ETH_RSS_NONFRAG_IPV6_OTHER,
- [IAVF_FILTER_PCTYPE_FRAG_IPV6] = ETH_RSS_FRAG_IPV6,
+ RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
+ [IAVF_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_RSS_FRAG_IPV6,
/* L2 Payload */
- [IAVF_FILTER_PCTYPE_L2_PAYLOAD] = ETH_RSS_L2_PAYLOAD
+ [IAVF_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_RSS_L2_PAYLOAD
};
- const uint64_t ipv4_rss = ETH_RSS_NONFRAG_IPV4_UDP |
- ETH_RSS_NONFRAG_IPV4_TCP |
- ETH_RSS_NONFRAG_IPV4_SCTP |
- ETH_RSS_NONFRAG_IPV4_OTHER |
- ETH_RSS_FRAG_IPV4;
+ const uint64_t ipv4_rss = RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER |
+ RTE_ETH_RSS_FRAG_IPV4;
- const uint64_t ipv6_rss = ETH_RSS_NONFRAG_IPV6_UDP |
- ETH_RSS_NONFRAG_IPV6_TCP |
- ETH_RSS_NONFRAG_IPV6_SCTP |
- ETH_RSS_NONFRAG_IPV6_OTHER |
- ETH_RSS_FRAG_IPV6;
+ const uint64_t ipv6_rss = RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP |
+ RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
+ RTE_ETH_RSS_FRAG_IPV6;
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
uint64_t caps = 0, hena = 0, valid_rss_hf = 0;
}
/**
- * ETH_RSS_IPV4 and ETH_RSS_IPV6 can be considered as 2
+ * RTE_ETH_RSS_IPV4 and RTE_ETH_RSS_IPV6 can be considered as 2
* generalizations of all other IPv4 and IPv6 RSS types.
*/
- if (rss_hf & ETH_RSS_IPV4)
+ if (rss_hf & RTE_ETH_RSS_IPV4)
rss_hf |= ipv4_rss;
- if (rss_hf & ETH_RSS_IPV6)
+ if (rss_hf & RTE_ETH_RSS_IPV6)
rss_hf |= ipv6_rss;
RTE_BUILD_BUG_ON(RTE_DIM(map_hena_rss) > sizeof(uint64_t) * CHAR_BIT);
}
if (valid_rss_hf & ipv4_rss)
- valid_rss_hf |= rss_hf & ETH_RSS_IPV4;
+ valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV4;
if (valid_rss_hf & ipv6_rss)
- valid_rss_hf |= rss_hf & ETH_RSS_IPV6;
+ valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV6;
if (rss_hf & ~valid_rss_hf)
PMD_DRV_LOG(WARNING, "Unsupported rss_hf 0x%" PRIx64,
uint16_t i, j, nb_q;
int ret;
- rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
- nb_q = RTE_MIN(adapter->eth_dev->data->nb_rx_queues,
+ rss_conf = &adapter->dev_data->dev_conf.rx_adv_conf.rss_conf;
+ nb_q = RTE_MIN(adapter->dev_data->nb_rx_queues,
vf->max_rss_qregion);
if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
int ret;
- ret = iavf_request_queues(ad, num);
+ ret = iavf_request_queues(dev, num);
if (ret) {
PMD_DRV_LOG(ERR, "request queues from PF failed");
return ret;
return 0;
enable = !!(dev->data->dev_conf.txmode.offloads &
- DEV_TX_OFFLOAD_VLAN_INSERT);
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
iavf_config_vlan_insert_v2(adapter, enable);
return 0;
int err;
err = iavf_dev_vlan_offload_set(dev,
- ETH_VLAN_STRIP_MASK |
- ETH_QINQ_STRIP_MASK |
- ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK);
+ RTE_ETH_VLAN_STRIP_MASK |
+ RTE_ETH_QINQ_STRIP_MASK |
+ RTE_ETH_VLAN_FILTER_MASK |
+ RTE_ETH_VLAN_EXTEND_MASK);
if (err) {
PMD_DRV_LOG(ERR, "Failed to update vlan offload");
return err;
ad->rx_vec_allowed = true;
ad->tx_vec_allowed = true;
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
/* Large VF setting */
if (num_queue_pairs > IAVF_MAX_NUM_QUEUES_DFLT) {
struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_dev_data *dev_data = dev->data;
uint16_t buf_size, max_pkt_len;
+ uint32_t frame_size = dev->data->mtu + IAVF_ETH_OVERHEAD;
buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
/* Calculate the maximum packet length allowed */
max_pkt_len = RTE_MIN((uint32_t)
rxq->rx_buf_len * IAVF_MAX_CHAINED_RX_BUFFERS,
- dev->data->dev_conf.rxmode.max_rx_pkt_len);
-
- /* Check if the jumbo frame and maximum packet length are set
- * correctly.
- */
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- if (max_pkt_len <= IAVF_ETH_MAX_LEN ||
- max_pkt_len > IAVF_FRAME_SIZE_MAX) {
- PMD_DRV_LOG(ERR, "maximum packet length must be "
- "larger than %u and smaller than %u, "
- "as jumbo frame is enabled",
- (uint32_t)IAVF_ETH_MAX_LEN,
- (uint32_t)IAVF_FRAME_SIZE_MAX);
- return -EINVAL;
- }
- } else {
- if (max_pkt_len < RTE_ETHER_MIN_LEN ||
- max_pkt_len > IAVF_ETH_MAX_LEN) {
- PMD_DRV_LOG(ERR, "maximum packet length must be "
- "larger than %u and smaller than %u, "
- "as jumbo frame is disabled",
- (uint32_t)RTE_ETHER_MIN_LEN,
- (uint32_t)IAVF_ETH_MAX_LEN);
- return -EINVAL;
- }
+ frame_size);
+
+ /* Check if maximum packet length is set correctly. */
+ if (max_pkt_len <= RTE_ETHER_MIN_LEN ||
+ max_pkt_len > IAVF_FRAME_SIZE_MAX) {
+ PMD_DRV_LOG(ERR, "maximum packet length must be "
+ "larger than %u and smaller than %u",
+ (uint32_t)IAVF_ETH_MAX_LEN,
+ (uint32_t)IAVF_FRAME_SIZE_MAX);
+ return -EINVAL;
}
rxq->max_pkt_len = max_pkt_len;
- if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+ if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
rxq->max_pkt_len > buf_size) {
dev_data->scattered_rx = 1;
}
return -1;
}
- if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
- intr_handle->intr_vec =
- rte_zmalloc("intr_vec",
- dev->data->nb_rx_queues * sizeof(int), 0);
- if (!intr_handle->intr_vec) {
+ if (rte_intr_dp_is_en(intr_handle)) {
+ if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
+ dev->data->nb_rx_queues)) {
PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
dev->data->nb_rx_queues);
return -1;
}
}
+
qv_map = rte_zmalloc("qv_map",
dev->data->nb_rx_queues * sizeof(struct iavf_qv_map), 0);
if (!qv_map) {
PMD_DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
dev->data->nb_rx_queues);
- return -1;
+ goto qv_map_alloc_err;
}
if (!dev->data->dev_conf.intr_conf.rxq ||
*/
vf->msix_base = IAVF_MISC_VEC_ID;
- /* set ITR to max */
+ /* set ITR to default */
interval = iavf_calc_itr_interval(
- IAVF_QUEUE_ITR_INTERVAL_MAX);
+ IAVF_QUEUE_ITR_INTERVAL_DEFAULT);
IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
IAVF_VFINT_DYN_CTL01_INTENA_MASK |
(IAVF_ITR_INDEX_DEFAULT <<
for (i = 0; i < dev->data->nb_rx_queues; i++) {
qv_map[i].queue_id = i;
qv_map[i].vector_id = vf->msix_base;
- intr_handle->intr_vec[i] = IAVF_MISC_VEC_ID;
+ rte_intr_vec_list_index_set(intr_handle,
+ i, IAVF_MISC_VEC_ID);
}
vf->qv_map = qv_map;
PMD_DRV_LOG(DEBUG,
/* If Rx interrupt is reuquired, and we can use
* multi interrupts, then the vec is from 1
*/
- vf->nb_msix = RTE_MIN(intr_handle->nb_efd,
- (uint16_t)(vf->vf_res->max_vectors - 1));
+ vf->nb_msix =
+ RTE_MIN(rte_intr_nb_efd_get(intr_handle),
+ (uint16_t)(vf->vf_res->max_vectors - 1));
vf->msix_base = IAVF_RX_VEC_START;
vec = IAVF_RX_VEC_START;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
qv_map[i].queue_id = i;
qv_map[i].vector_id = vec;
- intr_handle->intr_vec[i] = vec++;
+ rte_intr_vec_list_index_set(intr_handle,
+ i, vec++);
if (vec >= vf->nb_msix + IAVF_RX_VEC_START)
vec = IAVF_RX_VEC_START;
}
if (!vf->lv_enabled) {
if (iavf_config_irq_map(adapter)) {
PMD_DRV_LOG(ERR, "config interrupt mapping failed");
- return -1;
+ goto config_irq_map_err;
}
} else {
uint16_t num_qv_maps = dev->data->nb_rx_queues;
if (iavf_config_irq_map_lv(adapter,
IAVF_IRQ_MAP_NUM_PER_BUF, index)) {
PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed");
- return -1;
+ goto config_irq_map_err;
}
num_qv_maps -= IAVF_IRQ_MAP_NUM_PER_BUF;
index += IAVF_IRQ_MAP_NUM_PER_BUF;
if (iavf_config_irq_map_lv(adapter, num_qv_maps, index)) {
PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed");
- return -1;
+ goto config_irq_map_err;
}
}
return 0;
+
+config_irq_map_err:
+ rte_free(vf->qv_map);
+ vf->qv_map = NULL;
+
+qv_map_alloc_err:
+ rte_intr_vec_list_free(intr_handle);
+
+ return -1;
}
static int
adapter->stopped = 0;
- vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ vf->max_pkt_len = dev->data->mtu + IAVF_ETH_OVERHEAD;
vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
dev->data->nb_tx_queues);
num_queue_pairs = vf->num_queue_pairs;
}
/* re-enable intr again, because efd assign may change */
if (dev->data->dev_conf.intr_conf.rxq != 0) {
- rte_intr_disable(intr_handle);
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
+ rte_intr_disable(intr_handle);
rte_intr_enable(intr_handle);
}
PMD_INIT_FUNC_TRACE();
+ if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) &&
+ dev->data->dev_conf.intr_conf.rxq != 0)
+ rte_intr_disable(intr_handle);
+
if (adapter->stopped == 1)
return 0;
/* Disable the interrupt for Rx */
rte_intr_efd_disable(intr_handle);
/* Rx interrupt vector mapping free */
- if (intr_handle->intr_vec) {
- rte_free(intr_handle->intr_vec);
- intr_handle->intr_vec = NULL;
- }
+ rte_intr_vec_list_free(intr_handle);
/* remove all mac addrs */
iavf_add_del_all_mac_addr(adapter, false);
dev_info->flow_type_rss_offloads = IAVF_RSS_OFFLOAD_ALL;
dev_info->max_mac_addrs = IAVF_NUM_MACADDR_MAX;
dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_QINQ_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_RSS_HASH;
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_QINQ_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO |
- DEV_TX_OFFLOAD_IPIP_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
- DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_KEEP_CRC;
+ dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
*/
switch (vf->link_speed) {
case 10:
- new_link.link_speed = ETH_SPEED_NUM_10M;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_10M;
break;
case 100:
- new_link.link_speed = ETH_SPEED_NUM_100M;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_100M;
break;
case 1000:
- new_link.link_speed = ETH_SPEED_NUM_1G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_1G;
break;
case 10000:
- new_link.link_speed = ETH_SPEED_NUM_10G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_10G;
break;
case 20000:
- new_link.link_speed = ETH_SPEED_NUM_20G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_20G;
break;
case 25000:
- new_link.link_speed = ETH_SPEED_NUM_25G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_25G;
break;
case 40000:
- new_link.link_speed = ETH_SPEED_NUM_40G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_40G;
break;
case 50000:
- new_link.link_speed = ETH_SPEED_NUM_50G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_50G;
break;
case 100000:
- new_link.link_speed = ETH_SPEED_NUM_100G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_100G;
break;
default:
- new_link.link_speed = ETH_SPEED_NUM_NONE;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
break;
}
- new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
- new_link.link_status = vf->link_up ? ETH_LINK_UP :
- ETH_LINK_DOWN;
+ new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+ new_link.link_status = vf->link_up ? RTE_ETH_LINK_UP :
+ RTE_ETH_LINK_DOWN;
new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
- ETH_LINK_SPEED_FIXED);
+ RTE_ETH_LINK_SPEED_FIXED);
return rte_eth_linkstatus_set(dev, &new_link);
}
bool enable;
int err;
- if (mask & ETH_VLAN_FILTER_MASK) {
- enable = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
iavf_iterate_vlan_filters_v2(dev, enable);
}
- if (mask & ETH_VLAN_STRIP_MASK) {
- enable = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+ enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
err = iavf_config_vlan_strip_v2(adapter, enable);
/* If not support, the stripping is already disabled by PF */
return -ENOTSUP;
/* Vlan stripping setting */
- if (mask & ETH_VLAN_STRIP_MASK) {
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
err = iavf_enable_vlan_strip(adapter);
else
err = iavf_disable_vlan_strip(adapter);
rte_memcpy(lut, vf->rss_lut, reta_size);
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift))
lut[i] = reta_conf[idx].reta[shift];
}
}
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift))
reta_conf[idx].reta[shift] = vf->rss_lut[i];
}
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
int ret;
- adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf = *rss_conf;
+ adapter->dev_data->dev_conf.rx_adv_conf.rss_conf = *rss_conf;
if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
return -ENOTSUP;
}
static int
-iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu __rte_unused)
{
- uint32_t frame_size = mtu + IAVF_ETH_OVERHEAD;
- int ret = 0;
-
- if (mtu < RTE_ETHER_MIN_MTU || frame_size > IAVF_FRAME_SIZE_MAX)
- return -EINVAL;
-
/* mtu setting is forbidden if port is start */
if (dev->data->dev_started) {
PMD_DRV_LOG(ERR, "port must be stopped before configuration");
return -EBUSY;
}
- if (frame_size > IAVF_ETH_MAX_LEN)
- dev->data->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
- else
- dev->data->dev_conf.rxmode.offloads &=
- ~DEV_RX_OFFLOAD_JUMBO_FRAME;
-
- dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
- return ret;
+ return 0;
}
static int
ret = iavf_query_stats(adapter, &pstats);
if (ret == 0) {
uint8_t crc_stats_len = (dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_KEEP_CRC) ? 0 :
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC) ? 0 :
RTE_ETHER_CRC_LEN;
iavf_update_stats(vsi, pstats);
stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
uint16_t msix_intr;
- msix_intr = pci_dev->intr_handle.intr_vec[queue_id];
+ msix_intr = rte_intr_vec_list_index_get(pci_dev->intr_handle,
+ queue_id);
if (msix_intr == IAVF_MISC_VEC_ID) {
PMD_DRV_LOG(INFO, "MISC is also enabled for control");
IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
IAVF_WRITE_FLUSH(hw);
- rte_intr_ack(&pci_dev->intr_handle);
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
+ rte_intr_ack(pci_dev->intr_handle);
return 0;
}
struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t msix_intr;
- msix_intr = pci_dev->intr_handle.intr_vec[queue_id];
+ msix_intr = rte_intr_vec_list_index_get(pci_dev->intr_handle,
+ queue_id);
if (msix_intr == IAVF_MISC_VEC_ID) {
PMD_DRV_LOG(ERR, "MISC is used for control, cannot disable it");
return -EIO;
struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ vf->eth_dev = dev;
+
err = iavf_parse_devargs(dev);
if (err) {
PMD_INIT_LOG(ERR, "Failed to parse devargs");
return -1;
}
+static void
+iavf_uninit_vf(struct rte_eth_dev *dev)
+{
+ struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ iavf_shutdown_adminq(hw);
+
+ rte_free(vf->vf_res);
+ vf->vsi_res = NULL;
+ vf->vf_res = NULL;
+
+ rte_free(vf->aq_resp);
+ vf->aq_resp = NULL;
+
+ rte_free(vf->qos_cap);
+ vf->qos_cap = NULL;
+
+ rte_free(vf->rss_lut);
+ vf->rss_lut = NULL;
+ rte_free(vf->rss_key);
+ vf->rss_key = NULL;
+}
+
/* Enable default admin queue interrupt setting */
static inline void
iavf_enable_irq0(struct iavf_hw *hw)
iavf_enable_irq0(hw);
}
+void
+iavf_dev_alarm_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t icr0;
+
+ iavf_disable_irq0(hw);
+
+ /* read out interrupt causes */
+ icr0 = IAVF_READ_REG(hw, IAVF_VFINT_ICR01);
+
+ if (icr0 & IAVF_VFINT_ICR01_ADMINQ_MASK) {
+ PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported");
+ iavf_handle_virtchnl_msg(dev);
+ }
+
+ iavf_enable_irq0(hw);
+
+ rte_eal_alarm_set(IAVF_ALARM_INTERVAL,
+ iavf_dev_alarm_handler, dev);
+}
+
static int
iavf_dev_flow_ops_get(struct rte_eth_dev *dev,
const struct rte_flow_ops **ops)
struct iavf_adapter *adapter =
IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
int ret = 0;
return 0;
}
rte_eth_copy_pci_info(eth_dev, pci_dev);
- eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
hw->vendor_id = pci_dev->id.vendor_id;
hw->device_id = pci_dev->id.device_id;
hw->bus.func = pci_dev->addr.function;
hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
hw->back = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
- adapter->eth_dev = eth_dev;
+ adapter->dev_data = eth_dev->data;
adapter->stopped = 1;
if (iavf_init_vf(eth_dev) != 0) {
}
/* set default ptype table */
- adapter->ptype_tbl = iavf_get_default_ptype_table();
+ iavf_set_default_ptype_table(eth_dev);
/* copy mac addr */
eth_dev->data->mac_addrs = rte_zmalloc(
PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
" store MAC addresses",
RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto init_vf_err;
}
/* If the MAC address is not configured by host,
* generate a random one.
rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
ð_dev->data->mac_addrs[0]);
- /* register callback func to eal lib */
- rte_intr_callback_register(&pci_dev->intr_handle,
- iavf_dev_interrupt_handler,
- (void *)eth_dev);
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
+ /* register callback func to eal lib */
+ rte_intr_callback_register(pci_dev->intr_handle,
+ iavf_dev_interrupt_handler,
+ (void *)eth_dev);
- /* enable uio intr after callback register */
- rte_intr_enable(&pci_dev->intr_handle);
+ /* enable uio intr after callback register */
+ rte_intr_enable(pci_dev->intr_handle);
+ } else {
+ rte_eal_alarm_set(IAVF_ALARM_INTERVAL,
+ iavf_dev_alarm_handler, eth_dev);
+ }
/* configure and enable device interrupt */
iavf_enable_irq0(hw);
ret = iavf_flow_init(adapter);
if (ret) {
PMD_INIT_LOG(ERR, "Failed to initialize flow");
- return ret;
+ goto flow_init_err;
}
iavf_default_rss_disable(adapter);
return 0;
+
+flow_init_err:
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+
+init_vf_err:
+ iavf_uninit_vf(eth_dev);
+
+ return ret;
}
static int
{
struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
struct iavf_adapter *adapter =
IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
iavf_config_promisc(adapter, false, false);
iavf_shutdown_adminq(hw);
- /* disable uio intr before callback unregister */
- rte_intr_disable(intr_handle);
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
+ /* disable uio intr before callback unregister */
+ rte_intr_disable(intr_handle);
- /* unregister callback func from eal lib */
- rte_intr_callback_unregister(intr_handle,
- iavf_dev_interrupt_handler, dev);
+ /* unregister callback func from eal lib */
+ rte_intr_callback_unregister(intr_handle,
+ iavf_dev_interrupt_handler, dev);
+ } else {
+ rte_eal_alarm_cancel(iavf_dev_alarm_handler, dev);
+ }
iavf_disable_irq0(hw);
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
return ret;
}
-static int
-iavf_drv_i40evf_check_handler(__rte_unused const char *key,
- const char *value, __rte_unused void *opaque)
-{
- if (strcmp(value, "i40evf"))
- return -1;
-
- return 0;
-}
-
-static int
-iavf_drv_i40evf_selected(struct rte_devargs *devargs, uint16_t device_id)
-{
- struct rte_kvargs *kvlist;
- int ret = 0;
-
- if (device_id != IAVF_DEV_ID_VF &&
- device_id != IAVF_DEV_ID_VF_HV &&
- device_id != IAVF_DEV_ID_X722_VF &&
- device_id != IAVF_DEV_ID_X722_A0_VF)
- return 0;
-
- if (devargs == NULL)
- return 0;
-
- kvlist = rte_kvargs_parse(devargs->args, NULL);
- if (kvlist == NULL)
- return 0;
-
- if (!rte_kvargs_count(kvlist, RTE_DEVARGS_KEY_DRIVER))
- goto exit;
-
- /* i40evf driver selected when there's a key-value pair:
- * driver=i40evf
- */
- if (rte_kvargs_process(kvlist, RTE_DEVARGS_KEY_DRIVER,
- iavf_drv_i40evf_check_handler, NULL) < 0)
- goto exit;
-
- ret = 1;
-
-exit:
- rte_kvargs_free(kvlist);
- return ret;
-}
-
static int eth_iavf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
- if (iavf_dcf_cap_selected(pci_dev->device.devargs) ||
- iavf_drv_i40evf_selected(pci_dev->device.devargs,
- pci_dev->id.device_id))
+ if (iavf_dcf_cap_selected(pci_dev->device.devargs))
return 1;
return rte_eth_dev_pci_generic_probe(pci_dev,
RTE_PMD_REGISTER_PCI(net_iavf, rte_iavf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_iavf, pci_id_iavf_map);
RTE_PMD_REGISTER_KMOD_DEP(net_iavf, "* igb_uio | vfio-pci");
-RTE_PMD_REGISTER_PARAM_STRING(net_iavf, "cap=dcf driver=i40evf");
+RTE_PMD_REGISTER_PARAM_STRING(net_iavf, "cap=dcf");
RTE_LOG_REGISTER_SUFFIX(iavf_logtype_init, init, NOTICE);
RTE_LOG_REGISTER_SUFFIX(iavf_logtype_driver, driver, NOTICE);
#ifdef RTE_ETHDEV_DEBUG_RX