#include "iavf_rxtx.h"
#include "iavf_generic_flow.h"
#include "rte_pmd_iavf.h"
+#include "iavf_ipsec_crypto.h"
/* devargs */
#define IAVF_PROTO_XTR_ARG "proto_xtr"
+#define IAVF_QUANTA_SIZE_ARG "quanta_size"
+
+uint64_t iavf_timestamp_dynflag;
+int iavf_timestamp_dynfield_offset = -1;
static const char * const iavf_valid_args[] = {
IAVF_PROTO_XTR_ARG,
+ IAVF_QUANTA_SIZE_ARG,
NULL
};
[IAVF_PROTO_XTR_IP_OFFSET] = {
.param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
.ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask },
+ [IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] = {
+ .param = {
+ .name = "intel_pmd_dynflag_proto_xtr_ipsec_crypto_said" },
+ .ol_flag =
+ &rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask },
};
static int iavf_dev_configure(struct rte_eth_dev *dev);
static int iavf_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static int iavf_dev_stats_reset(struct rte_eth_dev *dev);
+static int iavf_dev_xstats_reset(struct rte_eth_dev *dev);
static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned int n);
static int iavf_dev_xstats_get_names(struct rte_eth_dev *dev,
unsigned int offset;
};
+#define _OFF_OF(a) offsetof(struct iavf_eth_xstats, a)
static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
- {"rx_bytes", offsetof(struct iavf_eth_stats, rx_bytes)},
- {"rx_unicast_packets", offsetof(struct iavf_eth_stats, rx_unicast)},
- {"rx_multicast_packets", offsetof(struct iavf_eth_stats, rx_multicast)},
- {"rx_broadcast_packets", offsetof(struct iavf_eth_stats, rx_broadcast)},
- {"rx_dropped_packets", offsetof(struct iavf_eth_stats, rx_discards)},
+ {"rx_bytes", _OFF_OF(eth_stats.rx_bytes)},
+ {"rx_unicast_packets", _OFF_OF(eth_stats.rx_unicast)},
+ {"rx_multicast_packets", _OFF_OF(eth_stats.rx_multicast)},
+ {"rx_broadcast_packets", _OFF_OF(eth_stats.rx_broadcast)},
+ {"rx_dropped_packets", _OFF_OF(eth_stats.rx_discards)},
{"rx_unknown_protocol_packets", offsetof(struct iavf_eth_stats,
rx_unknown_protocol)},
- {"tx_bytes", offsetof(struct iavf_eth_stats, tx_bytes)},
- {"tx_unicast_packets", offsetof(struct iavf_eth_stats, tx_unicast)},
- {"tx_multicast_packets", offsetof(struct iavf_eth_stats, tx_multicast)},
- {"tx_broadcast_packets", offsetof(struct iavf_eth_stats, tx_broadcast)},
- {"tx_dropped_packets", offsetof(struct iavf_eth_stats, tx_discards)},
- {"tx_error_packets", offsetof(struct iavf_eth_stats, tx_errors)},
+ {"tx_bytes", _OFF_OF(eth_stats.tx_bytes)},
+ {"tx_unicast_packets", _OFF_OF(eth_stats.tx_unicast)},
+ {"tx_multicast_packets", _OFF_OF(eth_stats.tx_multicast)},
+ {"tx_broadcast_packets", _OFF_OF(eth_stats.tx_broadcast)},
+ {"tx_dropped_packets", _OFF_OF(eth_stats.tx_discards)},
+ {"tx_error_packets", _OFF_OF(eth_stats.tx_errors)},
+
+ {"inline_ipsec_crypto_ipackets", _OFF_OF(ips_stats.icount)},
+ {"inline_ipsec_crypto_ibytes", _OFF_OF(ips_stats.ibytes)},
+ {"inline_ipsec_crypto_ierrors", _OFF_OF(ips_stats.ierrors.count)},
+ {"inline_ipsec_crypto_ierrors_sad_lookup",
+ _OFF_OF(ips_stats.ierrors.sad_miss)},
+ {"inline_ipsec_crypto_ierrors_not_processed",
+ _OFF_OF(ips_stats.ierrors.not_processed)},
+ {"inline_ipsec_crypto_ierrors_icv_fail",
+ _OFF_OF(ips_stats.ierrors.icv_check)},
+ {"inline_ipsec_crypto_ierrors_length",
+ _OFF_OF(ips_stats.ierrors.ipsec_length)},
+ {"inline_ipsec_crypto_ierrors_misc",
+ _OFF_OF(ips_stats.ierrors.misc)},
};
+#undef _OFF_OF
#define IAVF_NB_XSTATS (sizeof(rte_iavf_stats_strings) / \
sizeof(rte_iavf_stats_strings[0]))
.stats_reset = iavf_dev_stats_reset,
.xstats_get = iavf_dev_xstats_get,
.xstats_get_names = iavf_dev_xstats_get_names,
- .xstats_reset = iavf_dev_stats_reset,
+ .xstats_reset = iavf_dev_xstats_reset,
.promiscuous_enable = iavf_dev_promiscuous_enable,
.promiscuous_disable = iavf_dev_promiscuous_disable,
.allmulticast_enable = iavf_dev_allmulticast_enable,
return 0;
}
+__rte_unused
+static int
+iavf_vfr_inprogress(struct iavf_hw *hw)
+{
+ int inprogress = 0;
+
+ if ((IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) &
+ IAVF_VFGEN_RSTAT_VFR_STATE_MASK) ==
+ VIRTCHNL_VFR_INPROGRESS)
+ inprogress = 1;
+
+ if (inprogress)
+ PMD_DRV_LOG(INFO, "Watchdog detected VFR in progress");
+
+ return inprogress;
+}
+
+__rte_unused
+static void
+iavf_dev_watchdog(void *cb_arg)
+{
+ struct iavf_adapter *adapter = cb_arg;
+ struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
+ int vfr_inprogress = 0, rc = 0;
+
+ /* check if watchdog has been disabled since last call */
+ if (!adapter->vf.watchdog_enabled)
+ return;
+
+ /* If in reset then poll vfr_inprogress register for completion */
+ if (adapter->vf.vf_reset) {
+ vfr_inprogress = iavf_vfr_inprogress(hw);
+
+ if (!vfr_inprogress) {
+ PMD_DRV_LOG(INFO, "VF \"%s\" reset has completed",
+ adapter->vf.eth_dev->data->name);
+ adapter->vf.vf_reset = false;
+ }
+ /* If not in reset then poll vfr_inprogress register for VFLR event */
+ } else {
+ vfr_inprogress = iavf_vfr_inprogress(hw);
+
+ if (vfr_inprogress) {
+ PMD_DRV_LOG(INFO,
+ "VF \"%s\" reset event detected by watchdog",
+ adapter->vf.eth_dev->data->name);
+
+ /* enter reset state with VFLR event */
+ adapter->vf.vf_reset = true;
+
+ rte_eth_dev_callback_process(adapter->vf.eth_dev,
+ RTE_ETH_EVENT_INTR_RESET, NULL);
+ }
+ }
+
+ /* re-alarm watchdog */
+ rc = rte_eal_alarm_set(IAVF_DEV_WATCHDOG_PERIOD,
+ &iavf_dev_watchdog, cb_arg);
+
+ if (rc)
+ PMD_DRV_LOG(ERR, "Failed \"%s\" to reset device watchdog alarm",
+ adapter->vf.eth_dev->data->name);
+}
+
+static void
+iavf_dev_watchdog_enable(struct iavf_adapter *adapter __rte_unused)
+{
+#if (IAVF_DEV_WATCHDOG_PERIOD > 0)
+ PMD_DRV_LOG(INFO, "Enabling device watchdog");
+ adapter->vf.watchdog_enabled = true;
+ if (rte_eal_alarm_set(IAVF_DEV_WATCHDOG_PERIOD,
+ &iavf_dev_watchdog, (void *)adapter))
+ PMD_DRV_LOG(ERR, "Failed to enabled device watchdog");
+#endif
+}
+
+static void
+iavf_dev_watchdog_disable(struct iavf_adapter *adapter __rte_unused)
+{
+#if (IAVF_DEV_WATCHDOG_PERIOD > 0)
+ PMD_DRV_LOG(INFO, "Disabling device watchdog");
+ adapter->vf.watchdog_enabled = false;
+#endif
+}
+
static int
iavf_set_mc_addr_list(struct rte_eth_dev *dev,
struct rte_ether_addr *mc_addrs,
static const uint64_t map_hena_rss[] = {
/* IPv4 */
[IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
- ETH_RSS_NONFRAG_IPV4_UDP,
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP,
[IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
- ETH_RSS_NONFRAG_IPV4_UDP,
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP,
[IAVF_FILTER_PCTYPE_NONF_IPV4_UDP] =
- ETH_RSS_NONFRAG_IPV4_UDP,
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP,
[IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
- ETH_RSS_NONFRAG_IPV4_TCP,
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP,
[IAVF_FILTER_PCTYPE_NONF_IPV4_TCP] =
- ETH_RSS_NONFRAG_IPV4_TCP,
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP,
[IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP] =
- ETH_RSS_NONFRAG_IPV4_SCTP,
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP,
[IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER] =
- ETH_RSS_NONFRAG_IPV4_OTHER,
- [IAVF_FILTER_PCTYPE_FRAG_IPV4] = ETH_RSS_FRAG_IPV4,
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
+ [IAVF_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_RSS_FRAG_IPV4,
/* IPv6 */
[IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
- ETH_RSS_NONFRAG_IPV6_UDP,
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP,
[IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
- ETH_RSS_NONFRAG_IPV6_UDP,
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP,
[IAVF_FILTER_PCTYPE_NONF_IPV6_UDP] =
- ETH_RSS_NONFRAG_IPV6_UDP,
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP,
[IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
- ETH_RSS_NONFRAG_IPV6_TCP,
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP,
[IAVF_FILTER_PCTYPE_NONF_IPV6_TCP] =
- ETH_RSS_NONFRAG_IPV6_TCP,
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP,
[IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP] =
- ETH_RSS_NONFRAG_IPV6_SCTP,
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP,
[IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER] =
- ETH_RSS_NONFRAG_IPV6_OTHER,
- [IAVF_FILTER_PCTYPE_FRAG_IPV6] = ETH_RSS_FRAG_IPV6,
+ RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
+ [IAVF_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_RSS_FRAG_IPV6,
/* L2 Payload */
- [IAVF_FILTER_PCTYPE_L2_PAYLOAD] = ETH_RSS_L2_PAYLOAD
+ [IAVF_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_RSS_L2_PAYLOAD
};
- const uint64_t ipv4_rss = ETH_RSS_NONFRAG_IPV4_UDP |
- ETH_RSS_NONFRAG_IPV4_TCP |
- ETH_RSS_NONFRAG_IPV4_SCTP |
- ETH_RSS_NONFRAG_IPV4_OTHER |
- ETH_RSS_FRAG_IPV4;
+ const uint64_t ipv4_rss = RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER |
+ RTE_ETH_RSS_FRAG_IPV4;
- const uint64_t ipv6_rss = ETH_RSS_NONFRAG_IPV6_UDP |
- ETH_RSS_NONFRAG_IPV6_TCP |
- ETH_RSS_NONFRAG_IPV6_SCTP |
- ETH_RSS_NONFRAG_IPV6_OTHER |
- ETH_RSS_FRAG_IPV6;
+ const uint64_t ipv6_rss = RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP |
+ RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
+ RTE_ETH_RSS_FRAG_IPV6;
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
uint64_t caps = 0, hena = 0, valid_rss_hf = 0;
}
/**
- * ETH_RSS_IPV4 and ETH_RSS_IPV6 can be considered as 2
+ * RTE_ETH_RSS_IPV4 and RTE_ETH_RSS_IPV6 can be considered as 2
* generalizations of all other IPv4 and IPv6 RSS types.
*/
- if (rss_hf & ETH_RSS_IPV4)
+ if (rss_hf & RTE_ETH_RSS_IPV4)
rss_hf |= ipv4_rss;
- if (rss_hf & ETH_RSS_IPV6)
+ if (rss_hf & RTE_ETH_RSS_IPV6)
rss_hf |= ipv6_rss;
RTE_BUILD_BUG_ON(RTE_DIM(map_hena_rss) > sizeof(uint64_t) * CHAR_BIT);
}
if (valid_rss_hf & ipv4_rss)
- valid_rss_hf |= rss_hf & ETH_RSS_IPV4;
+ valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV4;
if (valid_rss_hf & ipv6_rss)
- valid_rss_hf |= rss_hf & ETH_RSS_IPV6;
+ valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV6;
if (rss_hf & ~valid_rss_hf)
PMD_DRV_LOG(WARNING, "Unsupported rss_hf 0x%" PRIx64,
uint16_t i, j, nb_q;
int ret;
- rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
- nb_q = RTE_MIN(adapter->eth_dev->data->nb_rx_queues,
+ rss_conf = &adapter->dev_data->dev_conf.rx_adv_conf.rss_conf;
+ nb_q = RTE_MIN(adapter->dev_data->nb_rx_queues,
vf->max_rss_qregion);
if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
j = 0;
vf->rss_lut[i] = j;
}
- /* send virtchnnl ops to configure rss*/
+ /* send virtchnl ops to configure RSS */
ret = iavf_configure_rss_lut(adapter);
if (ret)
return ret;
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
int ret;
- ret = iavf_request_queues(ad, num);
+ ret = iavf_request_queues(dev, num);
if (ret) {
PMD_DRV_LOG(ERR, "request queues from PF failed");
return ret;
return 0;
enable = !!(dev->data->dev_conf.txmode.offloads &
- DEV_TX_OFFLOAD_VLAN_INSERT);
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
iavf_config_vlan_insert_v2(adapter, enable);
return 0;
int err;
err = iavf_dev_vlan_offload_set(dev,
- ETH_VLAN_STRIP_MASK |
- ETH_QINQ_STRIP_MASK |
- ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK);
+ RTE_ETH_VLAN_STRIP_MASK |
+ RTE_ETH_QINQ_STRIP_MASK |
+ RTE_ETH_VLAN_FILTER_MASK |
+ RTE_ETH_VLAN_EXTEND_MASK);
if (err) {
PMD_DRV_LOG(ERR, "Failed to update vlan offload");
return err;
ad->rx_vec_allowed = true;
ad->tx_vec_allowed = true;
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
/* Large VF setting */
if (num_queue_pairs > IAVF_MAX_NUM_QUEUES_DFLT) {
struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_dev_data *dev_data = dev->data;
uint16_t buf_size, max_pkt_len;
+ uint32_t frame_size = dev->data->mtu + IAVF_ETH_OVERHEAD;
+ enum iavf_status err;
buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
/* Calculate the maximum packet length allowed */
max_pkt_len = RTE_MIN((uint32_t)
rxq->rx_buf_len * IAVF_MAX_CHAINED_RX_BUFFERS,
- dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ frame_size);
+
+ /* Check if maximum packet length is set correctly. */
+ if (max_pkt_len <= RTE_ETHER_MIN_LEN ||
+ max_pkt_len > IAVF_FRAME_SIZE_MAX) {
+ PMD_DRV_LOG(ERR, "maximum packet length must be "
+ "larger than %u and smaller than %u",
+ (uint32_t)IAVF_ETH_MAX_LEN,
+ (uint32_t)IAVF_FRAME_SIZE_MAX);
+ return -EINVAL;
+ }
- /* Check if the jumbo frame and maximum packet length are set
- * correctly.
- */
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- if (max_pkt_len <= IAVF_ETH_MAX_LEN ||
- max_pkt_len > IAVF_FRAME_SIZE_MAX) {
- PMD_DRV_LOG(ERR, "maximum packet length must be "
- "larger than %u and smaller than %u, "
- "as jumbo frame is enabled",
- (uint32_t)IAVF_ETH_MAX_LEN,
- (uint32_t)IAVF_FRAME_SIZE_MAX);
- return -EINVAL;
- }
- } else {
- if (max_pkt_len < RTE_ETHER_MIN_LEN ||
- max_pkt_len > IAVF_ETH_MAX_LEN) {
- PMD_DRV_LOG(ERR, "maximum packet length must be "
- "larger than %u and smaller than %u, "
- "as jumbo frame is disabled",
- (uint32_t)RTE_ETHER_MIN_LEN,
- (uint32_t)IAVF_ETH_MAX_LEN);
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+ /* Register mbuf field and flag for Rx timestamp */
+ err = rte_mbuf_dyn_rx_timestamp_register(
+ &iavf_timestamp_dynfield_offset,
+ &iavf_timestamp_dynflag);
+ if (err) {
+ PMD_DRV_LOG(ERR,
+ "Cannot register mbuf field/flag for timestamp");
return -EINVAL;
}
}
rxq->max_pkt_len = max_pkt_len;
- if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+ if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
rxq->max_pkt_len > buf_size) {
dev_data->scattered_rx = 1;
}
return -1;
}
- if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
- intr_handle->intr_vec =
- rte_zmalloc("intr_vec",
- dev->data->nb_rx_queues * sizeof(int), 0);
- if (!intr_handle->intr_vec) {
+ if (rte_intr_dp_is_en(intr_handle)) {
+ if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
+ dev->data->nb_rx_queues)) {
PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
dev->data->nb_rx_queues);
return -1;
}
}
+
qv_map = rte_zmalloc("qv_map",
dev->data->nb_rx_queues * sizeof(struct iavf_qv_map), 0);
if (!qv_map) {
PMD_DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
dev->data->nb_rx_queues);
- return -1;
+ goto qv_map_alloc_err;
}
if (!dev->data->dev_conf.intr_conf.rxq ||
for (i = 0; i < dev->data->nb_rx_queues; i++) {
qv_map[i].queue_id = i;
qv_map[i].vector_id = vf->msix_base;
- intr_handle->intr_vec[i] = IAVF_MISC_VEC_ID;
+ rte_intr_vec_list_index_set(intr_handle,
+ i, IAVF_MISC_VEC_ID);
}
vf->qv_map = qv_map;
PMD_DRV_LOG(DEBUG,
"vector %u are mapping to all Rx queues",
vf->msix_base);
} else {
- /* If Rx interrupt is reuquired, and we can use
+ /* If Rx interrupt is required, and we can use
* multi interrupts, then the vec is from 1
*/
- vf->nb_msix = RTE_MIN(intr_handle->nb_efd,
- (uint16_t)(vf->vf_res->max_vectors - 1));
+ vf->nb_msix =
+ RTE_MIN(rte_intr_nb_efd_get(intr_handle),
+ (uint16_t)(vf->vf_res->max_vectors - 1));
vf->msix_base = IAVF_RX_VEC_START;
vec = IAVF_RX_VEC_START;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
qv_map[i].queue_id = i;
qv_map[i].vector_id = vec;
- intr_handle->intr_vec[i] = vec++;
+ rte_intr_vec_list_index_set(intr_handle,
+ i, vec++);
if (vec >= vf->nb_msix + IAVF_RX_VEC_START)
vec = IAVF_RX_VEC_START;
}
if (!vf->lv_enabled) {
if (iavf_config_irq_map(adapter)) {
PMD_DRV_LOG(ERR, "config interrupt mapping failed");
- return -1;
+ goto config_irq_map_err;
}
} else {
uint16_t num_qv_maps = dev->data->nb_rx_queues;
if (iavf_config_irq_map_lv(adapter,
IAVF_IRQ_MAP_NUM_PER_BUF, index)) {
PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed");
- return -1;
+ goto config_irq_map_err;
}
num_qv_maps -= IAVF_IRQ_MAP_NUM_PER_BUF;
index += IAVF_IRQ_MAP_NUM_PER_BUF;
if (iavf_config_irq_map_lv(adapter, num_qv_maps, index)) {
PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed");
- return -1;
+ goto config_irq_map_err;
}
}
return 0;
+
+config_irq_map_err:
+ rte_free(vf->qv_map);
+ vf->qv_map = NULL;
+
+qv_map_alloc_err:
+ rte_intr_vec_list_free(intr_handle);
+
+ return -1;
}
static int
adapter->stopped = 0;
- vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ vf->max_pkt_len = dev->data->mtu + IAVF_ETH_OVERHEAD;
vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
dev->data->nb_tx_queues);
num_queue_pairs = vf->num_queue_pairs;
return -1;
}
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_PTP) {
+ if (iavf_get_ptp_cap(adapter)) {
+ PMD_INIT_LOG(ERR, "Failed to get ptp capability");
+ return -1;
+ }
+ }
+
if (iavf_init_queues(dev) != 0) {
PMD_DRV_LOG(ERR, "failed to do Queue init");
return -1;
}
+ if (iavf_set_vf_quanta_size(adapter, index, num_queue_pairs) != 0)
+ PMD_DRV_LOG(WARNING, "configure quanta size failed");
+
/* If needed, send configure queues msg multiple times to make the
* adminq buffer length smaller than the 4K limitation.
*/
/* Disable the interrupt for Rx */
rte_intr_efd_disable(intr_handle);
/* Rx interrupt vector mapping free */
- if (intr_handle->intr_vec) {
- rte_free(intr_handle->intr_vec);
- intr_handle->intr_vec = NULL;
- }
+ rte_intr_vec_list_free(intr_handle);
/* remove all mac addrs */
iavf_add_del_all_mac_addr(adapter, false);
iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
false);
+ /* free iAVF security device context all related resources */
+ iavf_security_ctx_destroy(adapter);
+
adapter->stopped = 1;
dev->data->dev_started = 0;
static int
iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
- struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct iavf_adapter *adapter =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct iavf_info *vf = &adapter->vf;
dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
dev_info->reta_size = vf->vf_res->rss_lut_size;
dev_info->flow_type_rss_offloads = IAVF_RSS_OFFLOAD_ALL;
dev_info->max_mac_addrs = IAVF_NUM_MACADDR_MAX;
+ dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_QINQ_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_RSS_HASH;
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_QINQ_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO |
- DEV_TX_OFFLOAD_IPIP_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
- DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_KEEP_CRC;
+ dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
+
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_PTP)
+ dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
+
+ if (iavf_ipsec_crypto_supported(adapter)) {
+ dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SECURITY;
+ dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
+ }
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
*/
switch (vf->link_speed) {
case 10:
- new_link.link_speed = ETH_SPEED_NUM_10M;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_10M;
break;
case 100:
- new_link.link_speed = ETH_SPEED_NUM_100M;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_100M;
break;
case 1000:
- new_link.link_speed = ETH_SPEED_NUM_1G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_1G;
break;
case 10000:
- new_link.link_speed = ETH_SPEED_NUM_10G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_10G;
break;
case 20000:
- new_link.link_speed = ETH_SPEED_NUM_20G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_20G;
break;
case 25000:
- new_link.link_speed = ETH_SPEED_NUM_25G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_25G;
break;
case 40000:
- new_link.link_speed = ETH_SPEED_NUM_40G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_40G;
break;
case 50000:
- new_link.link_speed = ETH_SPEED_NUM_50G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_50G;
break;
case 100000:
- new_link.link_speed = ETH_SPEED_NUM_100G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_100G;
break;
default:
- new_link.link_speed = ETH_SPEED_NUM_NONE;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
break;
}
- new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
- new_link.link_status = vf->link_up ? ETH_LINK_UP :
- ETH_LINK_DOWN;
+ new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+ new_link.link_status = vf->link_up ? RTE_ETH_LINK_UP :
+ RTE_ETH_LINK_DOWN;
new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
- ETH_LINK_SPEED_FIXED);
+ RTE_ETH_LINK_SPEED_FIXED);
return rte_eth_linkstatus_set(dev, &new_link);
}
bool enable;
int err;
- if (mask & ETH_VLAN_FILTER_MASK) {
- enable = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
iavf_iterate_vlan_filters_v2(dev, enable);
}
- if (mask & ETH_VLAN_STRIP_MASK) {
- enable = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+ enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
err = iavf_config_vlan_strip_v2(adapter, enable);
/* If not support, the stripping is already disabled by PF */
return -ENOTSUP;
/* Vlan stripping setting */
- if (mask & ETH_VLAN_STRIP_MASK) {
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
err = iavf_enable_vlan_strip(adapter);
else
err = iavf_disable_vlan_strip(adapter);
rte_memcpy(lut, vf->rss_lut, reta_size);
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift))
lut[i] = reta_conf[idx].reta[shift];
}
rte_memcpy(vf->rss_lut, lut, reta_size);
- /* send virtchnnl ops to configure rss*/
+ /* send virtchnl ops to configure RSS */
ret = iavf_configure_rss_lut(adapter);
if (ret) /* revert back */
rte_memcpy(vf->rss_lut, lut, reta_size);
}
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift))
reta_conf[idx].reta[shift] = vf->rss_lut[i];
}
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
int ret;
- adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf = *rss_conf;
+ adapter->dev_data->dev_conf.rx_adv_conf.rss_conf = *rss_conf;
if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
return -ENOTSUP;
}
static int
-iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu __rte_unused)
{
- uint32_t frame_size = mtu + IAVF_ETH_OVERHEAD;
- int ret = 0;
-
- if (mtu < RTE_ETHER_MIN_MTU || frame_size > IAVF_FRAME_SIZE_MAX)
- return -EINVAL;
-
/* mtu setting is forbidden if port is start */
if (dev->data->dev_started) {
PMD_DRV_LOG(ERR, "port must be stopped before configuration");
return -EBUSY;
}
- if (frame_size > IAVF_ETH_MAX_LEN)
- dev->data->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
- else
- dev->data->dev_conf.rxmode.offloads &=
- ~DEV_RX_OFFLOAD_JUMBO_FRAME;
-
- dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
- return ret;
+ return 0;
}
static int
static void
iavf_update_stats(struct iavf_vsi *vsi, struct virtchnl_eth_stats *nes)
{
- struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset;
+ struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset.eth_stats;
iavf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
iavf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
ret = iavf_query_stats(adapter, &pstats);
if (ret == 0) {
uint8_t crc_stats_len = (dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_KEEP_CRC) ? 0 :
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC) ? 0 :
RTE_ETHER_CRC_LEN;
iavf_update_stats(vsi, pstats);
stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
return ret;
/* set stats offset base on current values */
- vsi->eth_stats_offset = *pstats;
+ vsi->eth_stats_offset.eth_stats = *pstats;
+
+ return 0;
+}
+static int
+iavf_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ iavf_dev_stats_reset(dev);
+ memset(&vf->vsi.eth_stats_offset.ips_stats, 0,
+ sizeof(struct iavf_ipsec_crypto_stats));
return 0;
}
return IAVF_NB_XSTATS;
}
+static void
+iavf_dev_update_ipsec_xstats(struct rte_eth_dev *ethdev,
+ struct iavf_ipsec_crypto_stats *ips)
+{
+ uint16_t idx;
+ for (idx = 0; idx < ethdev->data->nb_rx_queues; idx++) {
+ struct iavf_rx_queue *rxq;
+ struct iavf_ipsec_crypto_stats *stats;
+ rxq = (struct iavf_rx_queue *)ethdev->data->rx_queues[idx];
+ stats = &rxq->stats.ipsec_crypto;
+ ips->icount += stats->icount;
+ ips->ibytes += stats->ibytes;
+ ips->ierrors.count += stats->ierrors.count;
+ ips->ierrors.sad_miss += stats->ierrors.sad_miss;
+ ips->ierrors.not_processed += stats->ierrors.not_processed;
+ ips->ierrors.icv_check += stats->ierrors.icv_check;
+ ips->ierrors.ipsec_length += stats->ierrors.ipsec_length;
+ ips->ierrors.misc += stats->ierrors.misc;
+ }
+}
+
static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned int n)
{
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct iavf_vsi *vsi = &vf->vsi;
struct virtchnl_eth_stats *pstats = NULL;
+ struct iavf_eth_xstats iavf_xtats = {{0}};
if (n < IAVF_NB_XSTATS)
return IAVF_NB_XSTATS;
return 0;
iavf_update_stats(vsi, pstats);
+ iavf_xtats.eth_stats = *pstats;
+
+ if (iavf_ipsec_crypto_supported(adapter))
+ iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats);
/* loop over xstats array and values from pstats */
for (i = 0; i < IAVF_NB_XSTATS; i++) {
xstats[i].id = i;
- xstats[i].value = *(uint64_t *)(((char *)pstats) +
+ xstats[i].value = *(uint64_t *)(((char *)&iavf_xtats) +
rte_iavf_stats_strings[i].offset);
}
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
uint16_t msix_intr;
- msix_intr = pci_dev->intr_handle.intr_vec[queue_id];
+ msix_intr = rte_intr_vec_list_index_get(pci_dev->intr_handle,
+ queue_id);
if (msix_intr == IAVF_MISC_VEC_ID) {
PMD_DRV_LOG(INFO, "MISC is also enabled for control");
IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
IAVF_WRITE_FLUSH(hw);
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
- rte_intr_ack(&pci_dev->intr_handle);
+ rte_intr_ack(pci_dev->intr_handle);
return 0;
}
struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t msix_intr;
- msix_intr = pci_dev->intr_handle.intr_vec[queue_id];
+ msix_intr = rte_intr_vec_list_index_get(pci_dev->intr_handle,
+ queue_id);
if (msix_intr == IAVF_MISC_VEC_ID) {
PMD_DRV_LOG(ERR, "MISC is used for control, cannot disable it");
return -EIO;
{ "ipv6_flow", IAVF_PROTO_XTR_IPV6_FLOW },
{ "tcp", IAVF_PROTO_XTR_TCP },
{ "ip_offset", IAVF_PROTO_XTR_IP_OFFSET },
+ { "ipsec_crypto_said", IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID },
};
uint32_t i;
return xtr_type_map[i].type;
}
- PMD_DRV_LOG(ERR, "wrong proto_xtr type, "
- "it should be: vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset");
+ PMD_DRV_LOG(ERR, "wrong proto_xtr type, it should be: "
+ "vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset|ipsec_crypto_said");
return -1;
}
return 0;
}
+static int
+parse_u16(__rte_unused const char *key, const char *value, void *args)
+{
+ u16 *num = (u16 *)args;
+ u16 tmp;
+
+ errno = 0;
+ tmp = strtoull(value, NULL, 10);
+ if (errno || !tmp) {
+ PMD_DRV_LOG(WARNING, "%s: \"%s\" is not a valid u16",
+ key, value);
+ return -1;
+ }
+
+ *num = tmp;
+
+ return 0;
+}
+
static int iavf_parse_devargs(struct rte_eth_dev *dev)
{
struct iavf_adapter *ad =
if (ret)
goto bail;
+ ret = rte_kvargs_process(kvlist, IAVF_QUANTA_SIZE_ARG,
+ &parse_u16, &ad->devargs.quanta_size);
+ if (ret)
+ goto bail;
+
+ if (ad->devargs.quanta_size == 0)
+ ad->devargs.quanta_size = 1024;
+
+ if (ad->devargs.quanta_size < 256 || ad->devargs.quanta_size > 4096 ||
+ ad->devargs.quanta_size & 0x40) {
+ PMD_INIT_LOG(ERR, "invalid quanta size\n");
+ return -EINVAL;
+ }
+
bail:
rte_kvargs_free(kvlist);
return ret;
struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ vf->eth_dev = dev;
+
err = iavf_parse_devargs(dev);
if (err) {
PMD_INIT_LOG(ERR, "Failed to parse devargs");
}
}
+ if (vf->vsi_res->num_queue_pairs > IAVF_MAX_NUM_QUEUES_DFLT)
+ vf->lv_enabled = true;
+
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
if (iavf_get_supported_rxdid(adapter) != 0) {
PMD_INIT_LOG(ERR, "failed to do get supported rxdid");
return 0;
}
rte_eth_copy_pci_info(eth_dev, pci_dev);
- eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
hw->vendor_id = pci_dev->id.vendor_id;
hw->device_id = pci_dev->id.device_id;
hw->bus.func = pci_dev->addr.function;
hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
hw->back = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
- adapter->eth_dev = eth_dev;
+ adapter->dev_data = eth_dev->data;
adapter->stopped = 1;
if (iavf_init_vf(eth_dev) != 0) {
}
/* set default ptype table */
- adapter->ptype_tbl = iavf_get_default_ptype_table();
+ iavf_set_default_ptype_table(eth_dev);
/* copy mac addr */
eth_dev->data->mac_addrs = rte_zmalloc(
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
/* register callback func to eal lib */
- rte_intr_callback_register(&pci_dev->intr_handle,
+ rte_intr_callback_register(pci_dev->intr_handle,
iavf_dev_interrupt_handler,
(void *)eth_dev);
/* enable uio intr after callback register */
- rte_intr_enable(&pci_dev->intr_handle);
+ rte_intr_enable(pci_dev->intr_handle);
} else {
rte_eal_alarm_set(IAVF_ALARM_INTERVAL,
iavf_dev_alarm_handler, eth_dev);
goto flow_init_err;
}
+ /** Check if the IPsec Crypto offload is supported and create
+ * security_ctx if it is.
+ */
+ if (iavf_ipsec_crypto_supported(adapter)) {
+ /* Initialize security_ctx only for primary process*/
+ ret = iavf_security_ctx_create(adapter);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "failed to create ipsec crypto security instance");
+ return ret;
+ }
+
+ ret = iavf_security_init(adapter);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "failed to initialized ipsec crypto resources");
+ return ret;
+ }
+ }
+
iavf_default_rss_disable(adapter);
+
+ /* Start device watchdog */
+ iavf_dev_watchdog_enable(adapter);
+
+
return 0;
flow_init_err:
{
struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
struct iavf_adapter *adapter =
IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
if (vf->vf_reset && !rte_pci_set_bus_master(pci_dev, true))
vf->vf_reset = false;
+ /* disable watchdog */
+ iavf_dev_watchdog_disable(adapter);
+
return ret;
}