1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
16 #include <rte_interrupts.h>
17 #include <rte_debug.h>
19 #include <rte_alarm.h>
20 #include <rte_atomic.h>
22 #include <rte_ether.h>
23 #include <ethdev_driver.h>
24 #include <ethdev_pci.h>
25 #include <rte_malloc.h>
26 #include <rte_memzone.h>
30 #include "iavf_rxtx.h"
31 #include "iavf_generic_flow.h"
32 #include "rte_pmd_iavf.h"
33 #include "iavf_ipsec_crypto.h"
36 #define IAVF_PROTO_XTR_ARG "proto_xtr"
37 #define IAVF_QUANTA_SIZE_ARG "quanta_size"
39 uint64_t iavf_timestamp_dynflag;
40 int iavf_timestamp_dynfield_offset = -1;
42 static const char * const iavf_valid_args[] = {
48 static const struct rte_mbuf_dynfield iavf_proto_xtr_metadata_param = {
49 .name = "intel_pmd_dynfield_proto_xtr_metadata",
50 .size = sizeof(uint32_t),
51 .align = __alignof__(uint32_t),
55 struct iavf_proto_xtr_ol {
56 const struct rte_mbuf_dynflag param;
61 static struct iavf_proto_xtr_ol iavf_proto_xtr_params[] = {
62 [IAVF_PROTO_XTR_VLAN] = {
63 .param = { .name = "intel_pmd_dynflag_proto_xtr_vlan" },
64 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_vlan_mask },
65 [IAVF_PROTO_XTR_IPV4] = {
66 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv4" },
67 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask },
68 [IAVF_PROTO_XTR_IPV6] = {
69 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6" },
70 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask },
71 [IAVF_PROTO_XTR_IPV6_FLOW] = {
72 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6_flow" },
73 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask },
74 [IAVF_PROTO_XTR_TCP] = {
75 .param = { .name = "intel_pmd_dynflag_proto_xtr_tcp" },
76 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_tcp_mask },
77 [IAVF_PROTO_XTR_IP_OFFSET] = {
78 .param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
79 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask },
80 [IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] = {
82 .name = "intel_pmd_dynflag_proto_xtr_ipsec_crypto_said" },
84 &rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask },
87 static int iavf_dev_configure(struct rte_eth_dev *dev);
88 static int iavf_dev_start(struct rte_eth_dev *dev);
89 static int iavf_dev_stop(struct rte_eth_dev *dev);
90 static int iavf_dev_close(struct rte_eth_dev *dev);
91 static int iavf_dev_reset(struct rte_eth_dev *dev);
92 static int iavf_dev_info_get(struct rte_eth_dev *dev,
93 struct rte_eth_dev_info *dev_info);
94 static const uint32_t *iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
95 static int iavf_dev_stats_get(struct rte_eth_dev *dev,
96 struct rte_eth_stats *stats);
97 static int iavf_dev_stats_reset(struct rte_eth_dev *dev);
98 static int iavf_dev_xstats_reset(struct rte_eth_dev *dev);
99 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
100 struct rte_eth_xstat *xstats, unsigned int n);
101 static int iavf_dev_xstats_get_names(struct rte_eth_dev *dev,
102 struct rte_eth_xstat_name *xstats_names,
104 static int iavf_dev_promiscuous_enable(struct rte_eth_dev *dev);
105 static int iavf_dev_promiscuous_disable(struct rte_eth_dev *dev);
106 static int iavf_dev_allmulticast_enable(struct rte_eth_dev *dev);
107 static int iavf_dev_allmulticast_disable(struct rte_eth_dev *dev);
108 static int iavf_dev_add_mac_addr(struct rte_eth_dev *dev,
109 struct rte_ether_addr *addr,
112 static void iavf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index);
113 static int iavf_dev_vlan_filter_set(struct rte_eth_dev *dev,
114 uint16_t vlan_id, int on);
115 static int iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
116 static int iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
117 struct rte_eth_rss_reta_entry64 *reta_conf,
119 static int iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
120 struct rte_eth_rss_reta_entry64 *reta_conf,
122 static int iavf_dev_rss_hash_update(struct rte_eth_dev *dev,
123 struct rte_eth_rss_conf *rss_conf);
124 static int iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
125 struct rte_eth_rss_conf *rss_conf);
126 static int iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
127 static int iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
128 struct rte_ether_addr *mac_addr);
129 static int iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
131 static int iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
133 static int iavf_dev_flow_ops_get(struct rte_eth_dev *dev,
134 const struct rte_flow_ops **ops);
135 static int iavf_set_mc_addr_list(struct rte_eth_dev *dev,
136 struct rte_ether_addr *mc_addrs,
137 uint32_t mc_addrs_num);
138 static int iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg);
140 static const struct rte_pci_id pci_id_iavf_map[] = {
141 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
142 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_VF) },
143 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_VF_HV) },
144 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_X722_VF) },
145 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_X722_A0_VF) },
146 { .vendor_id = 0, /* sentinel */ },
149 struct rte_iavf_xstats_name_off {
150 char name[RTE_ETH_XSTATS_NAME_SIZE];
154 #define _OFF_OF(a) offsetof(struct iavf_eth_xstats, a)
155 static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
156 {"rx_bytes", _OFF_OF(eth_stats.rx_bytes)},
157 {"rx_unicast_packets", _OFF_OF(eth_stats.rx_unicast)},
158 {"rx_multicast_packets", _OFF_OF(eth_stats.rx_multicast)},
159 {"rx_broadcast_packets", _OFF_OF(eth_stats.rx_broadcast)},
160 {"rx_dropped_packets", _OFF_OF(eth_stats.rx_discards)},
161 {"rx_unknown_protocol_packets", offsetof(struct iavf_eth_stats,
162 rx_unknown_protocol)},
163 {"tx_bytes", _OFF_OF(eth_stats.tx_bytes)},
164 {"tx_unicast_packets", _OFF_OF(eth_stats.tx_unicast)},
165 {"tx_multicast_packets", _OFF_OF(eth_stats.tx_multicast)},
166 {"tx_broadcast_packets", _OFF_OF(eth_stats.tx_broadcast)},
167 {"tx_dropped_packets", _OFF_OF(eth_stats.tx_discards)},
168 {"tx_error_packets", _OFF_OF(eth_stats.tx_errors)},
170 {"inline_ipsec_crypto_ipackets", _OFF_OF(ips_stats.icount)},
171 {"inline_ipsec_crypto_ibytes", _OFF_OF(ips_stats.ibytes)},
172 {"inline_ipsec_crypto_ierrors", _OFF_OF(ips_stats.ierrors.count)},
173 {"inline_ipsec_crypto_ierrors_sad_lookup",
174 _OFF_OF(ips_stats.ierrors.sad_miss)},
175 {"inline_ipsec_crypto_ierrors_not_processed",
176 _OFF_OF(ips_stats.ierrors.not_processed)},
177 {"inline_ipsec_crypto_ierrors_icv_fail",
178 _OFF_OF(ips_stats.ierrors.icv_check)},
179 {"inline_ipsec_crypto_ierrors_length",
180 _OFF_OF(ips_stats.ierrors.ipsec_length)},
181 {"inline_ipsec_crypto_ierrors_misc",
182 _OFF_OF(ips_stats.ierrors.misc)},
186 #define IAVF_NB_XSTATS (sizeof(rte_iavf_stats_strings) / \
187 sizeof(rte_iavf_stats_strings[0]))
189 static const struct eth_dev_ops iavf_eth_dev_ops = {
190 .dev_configure = iavf_dev_configure,
191 .dev_start = iavf_dev_start,
192 .dev_stop = iavf_dev_stop,
193 .dev_close = iavf_dev_close,
194 .dev_reset = iavf_dev_reset,
195 .dev_infos_get = iavf_dev_info_get,
196 .dev_supported_ptypes_get = iavf_dev_supported_ptypes_get,
197 .link_update = iavf_dev_link_update,
198 .stats_get = iavf_dev_stats_get,
199 .stats_reset = iavf_dev_stats_reset,
200 .xstats_get = iavf_dev_xstats_get,
201 .xstats_get_names = iavf_dev_xstats_get_names,
202 .xstats_reset = iavf_dev_xstats_reset,
203 .promiscuous_enable = iavf_dev_promiscuous_enable,
204 .promiscuous_disable = iavf_dev_promiscuous_disable,
205 .allmulticast_enable = iavf_dev_allmulticast_enable,
206 .allmulticast_disable = iavf_dev_allmulticast_disable,
207 .mac_addr_add = iavf_dev_add_mac_addr,
208 .mac_addr_remove = iavf_dev_del_mac_addr,
209 .set_mc_addr_list = iavf_set_mc_addr_list,
210 .vlan_filter_set = iavf_dev_vlan_filter_set,
211 .vlan_offload_set = iavf_dev_vlan_offload_set,
212 .rx_queue_start = iavf_dev_rx_queue_start,
213 .rx_queue_stop = iavf_dev_rx_queue_stop,
214 .tx_queue_start = iavf_dev_tx_queue_start,
215 .tx_queue_stop = iavf_dev_tx_queue_stop,
216 .rx_queue_setup = iavf_dev_rx_queue_setup,
217 .rx_queue_release = iavf_dev_rx_queue_release,
218 .tx_queue_setup = iavf_dev_tx_queue_setup,
219 .tx_queue_release = iavf_dev_tx_queue_release,
220 .mac_addr_set = iavf_dev_set_default_mac_addr,
221 .reta_update = iavf_dev_rss_reta_update,
222 .reta_query = iavf_dev_rss_reta_query,
223 .rss_hash_update = iavf_dev_rss_hash_update,
224 .rss_hash_conf_get = iavf_dev_rss_hash_conf_get,
225 .rxq_info_get = iavf_dev_rxq_info_get,
226 .txq_info_get = iavf_dev_txq_info_get,
227 .mtu_set = iavf_dev_mtu_set,
228 .rx_queue_intr_enable = iavf_dev_rx_queue_intr_enable,
229 .rx_queue_intr_disable = iavf_dev_rx_queue_intr_disable,
230 .flow_ops_get = iavf_dev_flow_ops_get,
231 .tx_done_cleanup = iavf_dev_tx_done_cleanup,
232 .get_monitor_addr = iavf_get_monitor_addr,
233 .tm_ops_get = iavf_tm_ops_get,
237 iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
243 *(const void **)arg = &iavf_tm_ops;
250 iavf_vfr_inprogress(struct iavf_hw *hw)
254 if ((IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) &
255 IAVF_VFGEN_RSTAT_VFR_STATE_MASK) ==
256 VIRTCHNL_VFR_INPROGRESS)
260 PMD_DRV_LOG(INFO, "Watchdog detected VFR in progress");
267 iavf_dev_watchdog(void *cb_arg)
269 struct iavf_adapter *adapter = cb_arg;
270 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
271 int vfr_inprogress = 0, rc = 0;
273 /* check if watchdog has been disabled since last call */
274 if (!adapter->vf.watchdog_enabled)
277 /* If in reset then poll vfr_inprogress register for completion */
278 if (adapter->vf.vf_reset) {
279 vfr_inprogress = iavf_vfr_inprogress(hw);
281 if (!vfr_inprogress) {
282 PMD_DRV_LOG(INFO, "VF \"%s\" reset has completed",
283 adapter->vf.eth_dev->data->name);
284 adapter->vf.vf_reset = false;
286 /* If not in reset then poll vfr_inprogress register for VFLR event */
288 vfr_inprogress = iavf_vfr_inprogress(hw);
290 if (vfr_inprogress) {
292 "VF \"%s\" reset event detected by watchdog",
293 adapter->vf.eth_dev->data->name);
295 /* enter reset state with VFLR event */
296 adapter->vf.vf_reset = true;
298 rte_eth_dev_callback_process(adapter->vf.eth_dev,
299 RTE_ETH_EVENT_INTR_RESET, NULL);
303 /* re-alarm watchdog */
304 rc = rte_eal_alarm_set(IAVF_DEV_WATCHDOG_PERIOD,
305 &iavf_dev_watchdog, cb_arg);
308 PMD_DRV_LOG(ERR, "Failed \"%s\" to reset device watchdog alarm",
309 adapter->vf.eth_dev->data->name);
313 iavf_dev_watchdog_enable(struct iavf_adapter *adapter __rte_unused)
315 #if (IAVF_DEV_WATCHDOG_PERIOD > 0)
316 PMD_DRV_LOG(INFO, "Enabling device watchdog");
317 adapter->vf.watchdog_enabled = true;
318 if (rte_eal_alarm_set(IAVF_DEV_WATCHDOG_PERIOD,
319 &iavf_dev_watchdog, (void *)adapter))
320 PMD_DRV_LOG(ERR, "Failed to enabled device watchdog");
325 iavf_dev_watchdog_disable(struct iavf_adapter *adapter __rte_unused)
327 #if (IAVF_DEV_WATCHDOG_PERIOD > 0)
328 PMD_DRV_LOG(INFO, "Disabling device watchdog");
329 adapter->vf.watchdog_enabled = false;
334 iavf_set_mc_addr_list(struct rte_eth_dev *dev,
335 struct rte_ether_addr *mc_addrs,
336 uint32_t mc_addrs_num)
338 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
339 struct iavf_adapter *adapter =
340 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
343 if (mc_addrs_num > IAVF_NUM_MACADDR_MAX) {
345 "can't add more than a limited number (%u) of addresses.",
346 (uint32_t)IAVF_NUM_MACADDR_MAX);
350 /* flush previous addresses */
351 err = iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
357 err = iavf_add_del_mc_addr_list(adapter, mc_addrs, mc_addrs_num, true);
360 /* if adding mac address list fails, should add the previous
363 ret = iavf_add_del_mc_addr_list(adapter, vf->mc_addrs,
364 vf->mc_addrs_num, true);
368 vf->mc_addrs_num = mc_addrs_num;
370 mc_addrs, mc_addrs_num * sizeof(*mc_addrs));
377 iavf_config_rss_hf(struct iavf_adapter *adapter, uint64_t rss_hf)
379 static const uint64_t map_hena_rss[] = {
381 [IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
382 RTE_ETH_RSS_NONFRAG_IPV4_UDP,
383 [IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
384 RTE_ETH_RSS_NONFRAG_IPV4_UDP,
385 [IAVF_FILTER_PCTYPE_NONF_IPV4_UDP] =
386 RTE_ETH_RSS_NONFRAG_IPV4_UDP,
387 [IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
388 RTE_ETH_RSS_NONFRAG_IPV4_TCP,
389 [IAVF_FILTER_PCTYPE_NONF_IPV4_TCP] =
390 RTE_ETH_RSS_NONFRAG_IPV4_TCP,
391 [IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP] =
392 RTE_ETH_RSS_NONFRAG_IPV4_SCTP,
393 [IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER] =
394 RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
395 [IAVF_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_RSS_FRAG_IPV4,
398 [IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
399 RTE_ETH_RSS_NONFRAG_IPV6_UDP,
400 [IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
401 RTE_ETH_RSS_NONFRAG_IPV6_UDP,
402 [IAVF_FILTER_PCTYPE_NONF_IPV6_UDP] =
403 RTE_ETH_RSS_NONFRAG_IPV6_UDP,
404 [IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
405 RTE_ETH_RSS_NONFRAG_IPV6_TCP,
406 [IAVF_FILTER_PCTYPE_NONF_IPV6_TCP] =
407 RTE_ETH_RSS_NONFRAG_IPV6_TCP,
408 [IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP] =
409 RTE_ETH_RSS_NONFRAG_IPV6_SCTP,
410 [IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER] =
411 RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
412 [IAVF_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_RSS_FRAG_IPV6,
415 [IAVF_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_RSS_L2_PAYLOAD
418 const uint64_t ipv4_rss = RTE_ETH_RSS_NONFRAG_IPV4_UDP |
419 RTE_ETH_RSS_NONFRAG_IPV4_TCP |
420 RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
421 RTE_ETH_RSS_NONFRAG_IPV4_OTHER |
422 RTE_ETH_RSS_FRAG_IPV4;
424 const uint64_t ipv6_rss = RTE_ETH_RSS_NONFRAG_IPV6_UDP |
425 RTE_ETH_RSS_NONFRAG_IPV6_TCP |
426 RTE_ETH_RSS_NONFRAG_IPV6_SCTP |
427 RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
428 RTE_ETH_RSS_FRAG_IPV6;
430 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
431 uint64_t caps = 0, hena = 0, valid_rss_hf = 0;
435 ret = iavf_get_hena_caps(adapter, &caps);
438 * RSS offload type configuration is not a necessary feature
439 * for VF, so here just print a warning and return.
442 "fail to get RSS offload type caps, ret: %d", ret);
447 * RTE_ETH_RSS_IPV4 and RTE_ETH_RSS_IPV6 can be considered as 2
448 * generalizations of all other IPv4 and IPv6 RSS types.
450 if (rss_hf & RTE_ETH_RSS_IPV4)
453 if (rss_hf & RTE_ETH_RSS_IPV6)
456 RTE_BUILD_BUG_ON(RTE_DIM(map_hena_rss) > sizeof(uint64_t) * CHAR_BIT);
458 for (i = 0; i < RTE_DIM(map_hena_rss); i++) {
459 uint64_t bit = BIT_ULL(i);
461 if ((caps & bit) && (map_hena_rss[i] & rss_hf)) {
462 valid_rss_hf |= map_hena_rss[i];
467 ret = iavf_set_hena(adapter, hena);
470 * RSS offload type configuration is not a necessary feature
471 * for VF, so here just print a warning and return.
474 "fail to set RSS offload types, ret: %d", ret);
478 if (valid_rss_hf & ipv4_rss)
479 valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV4;
481 if (valid_rss_hf & ipv6_rss)
482 valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV6;
484 if (rss_hf & ~valid_rss_hf)
485 PMD_DRV_LOG(WARNING, "Unsupported rss_hf 0x%" PRIx64,
486 rss_hf & ~valid_rss_hf);
488 vf->rss_hf = valid_rss_hf;
492 iavf_init_rss(struct iavf_adapter *adapter)
494 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
495 struct rte_eth_rss_conf *rss_conf;
499 rss_conf = &adapter->dev_data->dev_conf.rx_adv_conf.rss_conf;
500 nb_q = RTE_MIN(adapter->dev_data->nb_rx_queues,
501 vf->max_rss_qregion);
503 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
504 PMD_DRV_LOG(DEBUG, "RSS is not supported");
508 /* configure RSS key */
509 if (!rss_conf->rss_key) {
510 /* Calculate the default hash key */
511 for (i = 0; i < vf->vf_res->rss_key_size; i++)
512 vf->rss_key[i] = (uint8_t)rte_rand();
514 rte_memcpy(vf->rss_key, rss_conf->rss_key,
515 RTE_MIN(rss_conf->rss_key_len,
516 vf->vf_res->rss_key_size));
518 /* init RSS LUT table */
519 for (i = 0, j = 0; i < vf->vf_res->rss_lut_size; i++, j++) {
524 /* send virtchnl ops to configure RSS */
525 ret = iavf_configure_rss_lut(adapter);
528 ret = iavf_configure_rss_key(adapter);
532 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF) {
533 /* Set RSS hash configuration based on rss_conf->rss_hf. */
534 ret = iavf_rss_hash_set(adapter, rss_conf->rss_hf, true);
536 PMD_DRV_LOG(ERR, "fail to set default RSS");
540 iavf_config_rss_hf(adapter, rss_conf->rss_hf);
547 iavf_queues_req_reset(struct rte_eth_dev *dev, uint16_t num)
549 struct iavf_adapter *ad =
550 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
551 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
554 ret = iavf_request_queues(dev, num);
556 PMD_DRV_LOG(ERR, "request queues from PF failed");
559 PMD_DRV_LOG(INFO, "change queue pairs from %u to %u",
560 vf->vsi_res->num_queue_pairs, num);
562 ret = iavf_dev_reset(dev);
564 PMD_DRV_LOG(ERR, "vf reset failed");
572 iavf_dev_vlan_insert_set(struct rte_eth_dev *dev)
574 struct iavf_adapter *adapter =
575 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
576 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
579 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2))
582 enable = !!(dev->data->dev_conf.txmode.offloads &
583 RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
584 iavf_config_vlan_insert_v2(adapter, enable);
590 iavf_dev_init_vlan(struct rte_eth_dev *dev)
594 err = iavf_dev_vlan_offload_set(dev,
595 RTE_ETH_VLAN_STRIP_MASK |
596 RTE_ETH_QINQ_STRIP_MASK |
597 RTE_ETH_VLAN_FILTER_MASK |
598 RTE_ETH_VLAN_EXTEND_MASK);
600 PMD_DRV_LOG(ERR, "Failed to update vlan offload");
604 err = iavf_dev_vlan_insert_set(dev);
606 PMD_DRV_LOG(ERR, "Failed to update vlan insertion");
612 iavf_dev_configure(struct rte_eth_dev *dev)
614 struct iavf_adapter *ad =
615 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
616 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
617 uint16_t num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
618 dev->data->nb_tx_queues);
621 ad->rx_bulk_alloc_allowed = true;
622 /* Initialize to TRUE. If any of Rx queues doesn't meet the
623 * vector Rx/Tx preconditions, it will be reset.
625 ad->rx_vec_allowed = true;
626 ad->tx_vec_allowed = true;
628 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
629 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
631 /* Large VF setting */
632 if (num_queue_pairs > IAVF_MAX_NUM_QUEUES_DFLT) {
633 if (!(vf->vf_res->vf_cap_flags &
634 VIRTCHNL_VF_LARGE_NUM_QPAIRS)) {
635 PMD_DRV_LOG(ERR, "large VF is not supported");
639 if (num_queue_pairs > IAVF_MAX_NUM_QUEUES_LV) {
640 PMD_DRV_LOG(ERR, "queue pairs number cannot be larger than %u",
641 IAVF_MAX_NUM_QUEUES_LV);
645 ret = iavf_queues_req_reset(dev, num_queue_pairs);
649 ret = iavf_get_max_rss_queue_region(ad);
651 PMD_INIT_LOG(ERR, "get max rss queue region failed");
655 vf->lv_enabled = true;
657 /* Check if large VF is already enabled. If so, disable and
658 * release redundant queue resource.
659 * Or check if enough queue pairs. If not, request them from PF.
661 if (vf->lv_enabled ||
662 num_queue_pairs > vf->vsi_res->num_queue_pairs) {
663 ret = iavf_queues_req_reset(dev, num_queue_pairs);
667 vf->lv_enabled = false;
669 /* if large VF is not required, use default rss queue region */
670 vf->max_rss_qregion = IAVF_MAX_NUM_QUEUES_DFLT;
673 ret = iavf_dev_init_vlan(dev);
675 PMD_DRV_LOG(ERR, "configure VLAN failed: %d", ret);
677 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
678 if (iavf_init_rss(ad) != 0) {
679 PMD_DRV_LOG(ERR, "configure rss failed");
687 iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
689 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
690 struct rte_eth_dev_data *dev_data = dev->data;
691 uint16_t buf_size, max_pkt_len;
692 uint32_t frame_size = dev->data->mtu + IAVF_ETH_OVERHEAD;
693 enum iavf_status err;
695 buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
697 /* Calculate the maximum packet length allowed */
698 max_pkt_len = RTE_MIN((uint32_t)
699 rxq->rx_buf_len * IAVF_MAX_CHAINED_RX_BUFFERS,
702 /* Check if maximum packet length is set correctly. */
703 if (max_pkt_len <= RTE_ETHER_MIN_LEN ||
704 max_pkt_len > IAVF_FRAME_SIZE_MAX) {
705 PMD_DRV_LOG(ERR, "maximum packet length must be "
706 "larger than %u and smaller than %u",
707 (uint32_t)IAVF_ETH_MAX_LEN,
708 (uint32_t)IAVF_FRAME_SIZE_MAX);
712 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
713 /* Register mbuf field and flag for Rx timestamp */
714 err = rte_mbuf_dyn_rx_timestamp_register(
715 &iavf_timestamp_dynfield_offset,
716 &iavf_timestamp_dynflag);
719 "Cannot register mbuf field/flag for timestamp");
724 rxq->max_pkt_len = max_pkt_len;
725 if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
726 rxq->max_pkt_len > buf_size) {
727 dev_data->scattered_rx = 1;
729 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
730 IAVF_WRITE_FLUSH(hw);
736 iavf_init_queues(struct rte_eth_dev *dev)
738 struct iavf_rx_queue **rxq =
739 (struct iavf_rx_queue **)dev->data->rx_queues;
740 int i, ret = IAVF_SUCCESS;
742 for (i = 0; i < dev->data->nb_rx_queues; i++) {
743 if (!rxq[i] || !rxq[i]->q_set)
745 ret = iavf_init_rxq(dev, rxq[i]);
746 if (ret != IAVF_SUCCESS)
749 /* set rx/tx function to vector/scatter/single-segment
750 * according to parameters
752 iavf_set_rx_function(dev);
753 iavf_set_tx_function(dev);
758 static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev,
759 struct rte_intr_handle *intr_handle)
761 struct iavf_adapter *adapter =
762 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
763 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
764 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
765 struct iavf_qv_map *qv_map;
766 uint16_t interval, i;
769 if (rte_intr_cap_multiple(intr_handle) &&
770 dev->data->dev_conf.intr_conf.rxq) {
771 if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues))
775 if (rte_intr_dp_is_en(intr_handle)) {
776 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
777 dev->data->nb_rx_queues)) {
778 PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
779 dev->data->nb_rx_queues);
785 qv_map = rte_zmalloc("qv_map",
786 dev->data->nb_rx_queues * sizeof(struct iavf_qv_map), 0);
788 PMD_DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
789 dev->data->nb_rx_queues);
790 goto qv_map_alloc_err;
793 if (!dev->data->dev_conf.intr_conf.rxq ||
794 !rte_intr_dp_is_en(intr_handle)) {
795 /* Rx interrupt disabled, Map interrupt only for writeback */
797 if (vf->vf_res->vf_cap_flags &
798 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
799 /* If WB_ON_ITR supports, enable it */
800 vf->msix_base = IAVF_RX_VEC_START;
801 /* Set the ITR for index zero, to 2us to make sure that
802 * we leave time for aggregation to occur, but don't
803 * increase latency dramatically.
806 IAVF_VFINT_DYN_CTLN1(vf->msix_base - 1),
807 (0 << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
808 IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
809 (2UL << IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT));
810 /* debug - check for success! the return value
811 * should be 2, offset is 0x2800
813 /* IAVF_READ_REG(hw, IAVF_VFINT_ITRN1(0, 0)); */
815 /* If no WB_ON_ITR offload flags, need to set
816 * interrupt for descriptor write back.
818 vf->msix_base = IAVF_MISC_VEC_ID;
820 /* set ITR to default */
821 interval = iavf_calc_itr_interval(
822 IAVF_QUEUE_ITR_INTERVAL_DEFAULT);
823 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
824 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
825 (IAVF_ITR_INDEX_DEFAULT <<
826 IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
828 IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT));
830 IAVF_WRITE_FLUSH(hw);
831 /* map all queues to the same interrupt */
832 for (i = 0; i < dev->data->nb_rx_queues; i++) {
833 qv_map[i].queue_id = i;
834 qv_map[i].vector_id = vf->msix_base;
838 if (!rte_intr_allow_others(intr_handle)) {
840 vf->msix_base = IAVF_MISC_VEC_ID;
841 for (i = 0; i < dev->data->nb_rx_queues; i++) {
842 qv_map[i].queue_id = i;
843 qv_map[i].vector_id = vf->msix_base;
844 rte_intr_vec_list_index_set(intr_handle,
845 i, IAVF_MISC_VEC_ID);
849 "vector %u are mapping to all Rx queues",
852 /* If Rx interrupt is required, and we can use
853 * multi interrupts, then the vec is from 1
856 RTE_MIN(rte_intr_nb_efd_get(intr_handle),
857 (uint16_t)(vf->vf_res->max_vectors - 1));
858 vf->msix_base = IAVF_RX_VEC_START;
859 vec = IAVF_RX_VEC_START;
860 for (i = 0; i < dev->data->nb_rx_queues; i++) {
861 qv_map[i].queue_id = i;
862 qv_map[i].vector_id = vec;
863 rte_intr_vec_list_index_set(intr_handle,
865 if (vec >= vf->nb_msix + IAVF_RX_VEC_START)
866 vec = IAVF_RX_VEC_START;
870 "%u vectors are mapping to %u Rx queues",
871 vf->nb_msix, dev->data->nb_rx_queues);
875 if (!vf->lv_enabled) {
876 if (iavf_config_irq_map(adapter)) {
877 PMD_DRV_LOG(ERR, "config interrupt mapping failed");
878 goto config_irq_map_err;
881 uint16_t num_qv_maps = dev->data->nb_rx_queues;
884 while (num_qv_maps > IAVF_IRQ_MAP_NUM_PER_BUF) {
885 if (iavf_config_irq_map_lv(adapter,
886 IAVF_IRQ_MAP_NUM_PER_BUF, index)) {
887 PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed");
888 goto config_irq_map_err;
890 num_qv_maps -= IAVF_IRQ_MAP_NUM_PER_BUF;
891 index += IAVF_IRQ_MAP_NUM_PER_BUF;
894 if (iavf_config_irq_map_lv(adapter, num_qv_maps, index)) {
895 PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed");
896 goto config_irq_map_err;
902 rte_free(vf->qv_map);
906 rte_intr_vec_list_free(intr_handle);
912 iavf_start_queues(struct rte_eth_dev *dev)
914 struct iavf_rx_queue *rxq;
915 struct iavf_tx_queue *txq;
918 for (i = 0; i < dev->data->nb_tx_queues; i++) {
919 txq = dev->data->tx_queues[i];
920 if (txq->tx_deferred_start)
922 if (iavf_dev_tx_queue_start(dev, i) != 0) {
923 PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
928 for (i = 0; i < dev->data->nb_rx_queues; i++) {
929 rxq = dev->data->rx_queues[i];
930 if (rxq->rx_deferred_start)
932 if (iavf_dev_rx_queue_start(dev, i) != 0) {
933 PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
942 iavf_dev_start(struct rte_eth_dev *dev)
944 struct iavf_adapter *adapter =
945 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
946 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
947 struct rte_intr_handle *intr_handle = dev->intr_handle;
948 uint16_t num_queue_pairs;
951 PMD_INIT_FUNC_TRACE();
953 adapter->stopped = 0;
955 vf->max_pkt_len = dev->data->mtu + IAVF_ETH_OVERHEAD;
956 vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
957 dev->data->nb_tx_queues);
958 num_queue_pairs = vf->num_queue_pairs;
960 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
961 if (iavf_get_qos_cap(adapter)) {
962 PMD_INIT_LOG(ERR, "Failed to get qos capability");
966 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_PTP) {
967 if (iavf_get_ptp_cap(adapter)) {
968 PMD_INIT_LOG(ERR, "Failed to get ptp capability");
973 if (iavf_init_queues(dev) != 0) {
974 PMD_DRV_LOG(ERR, "failed to do Queue init");
978 if (iavf_set_vf_quanta_size(adapter, index, num_queue_pairs) != 0)
979 PMD_DRV_LOG(WARNING, "configure quanta size failed");
981 /* If needed, send configure queues msg multiple times to make the
982 * adminq buffer length smaller than the 4K limitation.
984 while (num_queue_pairs > IAVF_CFG_Q_NUM_PER_BUF) {
985 if (iavf_configure_queues(adapter,
986 IAVF_CFG_Q_NUM_PER_BUF, index) != 0) {
987 PMD_DRV_LOG(ERR, "configure queues failed");
990 num_queue_pairs -= IAVF_CFG_Q_NUM_PER_BUF;
991 index += IAVF_CFG_Q_NUM_PER_BUF;
994 if (iavf_configure_queues(adapter, num_queue_pairs, index) != 0) {
995 PMD_DRV_LOG(ERR, "configure queues failed");
999 if (iavf_config_rx_queues_irqs(dev, intr_handle) != 0) {
1000 PMD_DRV_LOG(ERR, "configure irq failed");
1003 /* re-enable intr again, because efd assign may change */
1004 if (dev->data->dev_conf.intr_conf.rxq != 0) {
1005 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1006 rte_intr_disable(intr_handle);
1007 rte_intr_enable(intr_handle);
1010 /* Set all mac addrs */
1011 iavf_add_del_all_mac_addr(adapter, true);
1013 /* Set all multicast addresses */
1014 iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
1017 if (iavf_start_queues(dev) != 0) {
1018 PMD_DRV_LOG(ERR, "enable queues failed");
1025 iavf_add_del_all_mac_addr(adapter, false);
1031 iavf_dev_stop(struct rte_eth_dev *dev)
1033 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1034 struct iavf_adapter *adapter =
1035 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1036 struct rte_intr_handle *intr_handle = dev->intr_handle;
1038 PMD_INIT_FUNC_TRACE();
1040 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) &&
1041 dev->data->dev_conf.intr_conf.rxq != 0)
1042 rte_intr_disable(intr_handle);
1044 if (adapter->stopped == 1)
1047 iavf_stop_queues(dev);
1049 /* Disable the interrupt for Rx */
1050 rte_intr_efd_disable(intr_handle);
1051 /* Rx interrupt vector mapping free */
1052 rte_intr_vec_list_free(intr_handle);
1054 /* remove all mac addrs */
1055 iavf_add_del_all_mac_addr(adapter, false);
1057 /* remove all multicast addresses */
1058 iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
1061 /* free iAVF security device context all related resources */
1062 iavf_security_ctx_destroy(adapter);
1064 adapter->stopped = 1;
1065 dev->data->dev_started = 0;
1071 iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1073 struct iavf_adapter *adapter =
1074 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1075 struct iavf_info *vf = &adapter->vf;
1077 dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
1078 dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
1079 dev_info->min_rx_bufsize = IAVF_BUF_SIZE_MIN;
1080 dev_info->max_rx_pktlen = IAVF_FRAME_SIZE_MAX;
1081 dev_info->max_mtu = dev_info->max_rx_pktlen - IAVF_ETH_OVERHEAD;
1082 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
1083 dev_info->hash_key_size = vf->vf_res->rss_key_size;
1084 dev_info->reta_size = vf->vf_res->rss_lut_size;
1085 dev_info->flow_type_rss_offloads = IAVF_RSS_OFFLOAD_ALL;
1086 dev_info->max_mac_addrs = IAVF_NUM_MACADDR_MAX;
1087 dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
1088 dev_info->rx_offload_capa =
1089 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
1090 RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
1091 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
1092 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
1093 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
1094 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1095 RTE_ETH_RX_OFFLOAD_SCATTER |
1096 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
1097 RTE_ETH_RX_OFFLOAD_RSS_HASH;
1099 dev_info->tx_offload_capa =
1100 RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
1101 RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
1102 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
1103 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
1104 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
1105 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
1106 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1107 RTE_ETH_TX_OFFLOAD_TCP_TSO |
1108 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
1109 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
1110 RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
1111 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
1112 RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
1113 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
1115 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
1116 dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
1118 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_PTP)
1119 dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
1121 if (iavf_ipsec_crypto_supported(adapter)) {
1122 dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SECURITY;
1123 dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
1126 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1127 .rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
1132 dev_info->default_txconf = (struct rte_eth_txconf) {
1133 .tx_free_thresh = IAVF_DEFAULT_TX_FREE_THRESH,
1134 .tx_rs_thresh = IAVF_DEFAULT_TX_RS_THRESH,
1138 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1139 .nb_max = IAVF_MAX_RING_DESC,
1140 .nb_min = IAVF_MIN_RING_DESC,
1141 .nb_align = IAVF_ALIGN_RING_DESC,
1144 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1145 .nb_max = IAVF_MAX_RING_DESC,
1146 .nb_min = IAVF_MIN_RING_DESC,
1147 .nb_align = IAVF_ALIGN_RING_DESC,
1153 static const uint32_t *
1154 iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1156 static const uint32_t ptypes[] = {
1158 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1161 RTE_PTYPE_L4_NONFRAG,
1171 iavf_dev_link_update(struct rte_eth_dev *dev,
1172 __rte_unused int wait_to_complete)
1174 struct rte_eth_link new_link;
1175 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1177 memset(&new_link, 0, sizeof(new_link));
1179 /* Only read status info stored in VF, and the info is updated
1180 * when receive LINK_CHANGE evnet from PF by Virtchnnl.
1182 switch (vf->link_speed) {
1184 new_link.link_speed = RTE_ETH_SPEED_NUM_10M;
1187 new_link.link_speed = RTE_ETH_SPEED_NUM_100M;
1190 new_link.link_speed = RTE_ETH_SPEED_NUM_1G;
1193 new_link.link_speed = RTE_ETH_SPEED_NUM_10G;
1196 new_link.link_speed = RTE_ETH_SPEED_NUM_20G;
1199 new_link.link_speed = RTE_ETH_SPEED_NUM_25G;
1202 new_link.link_speed = RTE_ETH_SPEED_NUM_40G;
1205 new_link.link_speed = RTE_ETH_SPEED_NUM_50G;
1208 new_link.link_speed = RTE_ETH_SPEED_NUM_100G;
1211 new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1215 new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1216 new_link.link_status = vf->link_up ? RTE_ETH_LINK_UP :
1218 new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1219 RTE_ETH_LINK_SPEED_FIXED);
1221 return rte_eth_linkstatus_set(dev, &new_link);
1225 iavf_dev_promiscuous_enable(struct rte_eth_dev *dev)
1227 struct iavf_adapter *adapter =
1228 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1229 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1231 return iavf_config_promisc(adapter,
1232 true, vf->promisc_multicast_enabled);
1236 iavf_dev_promiscuous_disable(struct rte_eth_dev *dev)
1238 struct iavf_adapter *adapter =
1239 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1240 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1242 return iavf_config_promisc(adapter,
1243 false, vf->promisc_multicast_enabled);
1247 iavf_dev_allmulticast_enable(struct rte_eth_dev *dev)
1249 struct iavf_adapter *adapter =
1250 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1251 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1253 return iavf_config_promisc(adapter,
1254 vf->promisc_unicast_enabled, true);
1258 iavf_dev_allmulticast_disable(struct rte_eth_dev *dev)
1260 struct iavf_adapter *adapter =
1261 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1262 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1264 return iavf_config_promisc(adapter,
1265 vf->promisc_unicast_enabled, false);
1269 iavf_dev_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr,
1270 __rte_unused uint32_t index,
1271 __rte_unused uint32_t pool)
1273 struct iavf_adapter *adapter =
1274 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1275 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1278 if (rte_is_zero_ether_addr(addr)) {
1279 PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1283 err = iavf_add_del_eth_addr(adapter, addr, true, VIRTCHNL_ETHER_ADDR_EXTRA);
1285 PMD_DRV_LOG(ERR, "fail to add MAC address");
1295 iavf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1297 struct iavf_adapter *adapter =
1298 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1299 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1300 struct rte_ether_addr *addr;
1303 addr = &dev->data->mac_addrs[index];
1305 err = iavf_add_del_eth_addr(adapter, addr, false, VIRTCHNL_ETHER_ADDR_EXTRA);
1307 PMD_DRV_LOG(ERR, "fail to delete MAC address");
1313 iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1315 struct iavf_adapter *adapter =
1316 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1317 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1320 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
1321 err = iavf_add_del_vlan_v2(adapter, vlan_id, on);
1327 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
1330 err = iavf_add_del_vlan(adapter, vlan_id, on);
1337 iavf_iterate_vlan_filters_v2(struct rte_eth_dev *dev, bool enable)
1339 struct rte_vlan_filter_conf *vfc = &dev->data->vlan_filter_conf;
1340 struct iavf_adapter *adapter =
1341 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1345 for (i = 0; i < RTE_DIM(vfc->ids); i++) {
1346 if (vfc->ids[i] == 0)
1350 for (j = 0; ids != 0 && j < 64; j++, ids >>= 1) {
1352 iavf_add_del_vlan_v2(adapter,
1353 64 * i + j, enable);
1359 iavf_dev_vlan_offload_set_v2(struct rte_eth_dev *dev, int mask)
1361 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1362 struct iavf_adapter *adapter =
1363 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1367 if (mask & RTE_ETH_VLAN_FILTER_MASK) {
1368 enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
1370 iavf_iterate_vlan_filters_v2(dev, enable);
1373 if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1374 enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
1376 err = iavf_config_vlan_strip_v2(adapter, enable);
1377 /* If not support, the stripping is already disabled by PF */
1378 if (err == -ENOTSUP && !enable)
1388 iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1390 struct iavf_adapter *adapter =
1391 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1392 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1393 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1396 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2)
1397 return iavf_dev_vlan_offload_set_v2(dev, mask);
1399 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
1402 /* Vlan stripping setting */
1403 if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1404 /* Enable or disable VLAN stripping */
1405 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1406 err = iavf_enable_vlan_strip(adapter);
1408 err = iavf_disable_vlan_strip(adapter);
1417 iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
1418 struct rte_eth_rss_reta_entry64 *reta_conf,
1421 struct iavf_adapter *adapter =
1422 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1423 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1425 uint16_t i, idx, shift;
1428 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1431 if (reta_size != vf->vf_res->rss_lut_size) {
1432 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1433 "(%d) doesn't match the number of hardware can "
1434 "support (%d)", reta_size, vf->vf_res->rss_lut_size);
1438 lut = rte_zmalloc("rss_lut", reta_size, 0);
1440 PMD_DRV_LOG(ERR, "No memory can be allocated");
1443 /* store the old lut table temporarily */
1444 rte_memcpy(lut, vf->rss_lut, reta_size);
1446 for (i = 0; i < reta_size; i++) {
1447 idx = i / RTE_ETH_RETA_GROUP_SIZE;
1448 shift = i % RTE_ETH_RETA_GROUP_SIZE;
1449 if (reta_conf[idx].mask & (1ULL << shift))
1450 lut[i] = reta_conf[idx].reta[shift];
1453 rte_memcpy(vf->rss_lut, lut, reta_size);
1454 /* send virtchnl ops to configure RSS */
1455 ret = iavf_configure_rss_lut(adapter);
1456 if (ret) /* revert back */
1457 rte_memcpy(vf->rss_lut, lut, reta_size);
1464 iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
1465 struct rte_eth_rss_reta_entry64 *reta_conf,
1468 struct iavf_adapter *adapter =
1469 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1470 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1471 uint16_t i, idx, shift;
1473 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1476 if (reta_size != vf->vf_res->rss_lut_size) {
1477 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1478 "(%d) doesn't match the number of hardware can "
1479 "support (%d)", reta_size, vf->vf_res->rss_lut_size);
1483 for (i = 0; i < reta_size; i++) {
1484 idx = i / RTE_ETH_RETA_GROUP_SIZE;
1485 shift = i % RTE_ETH_RETA_GROUP_SIZE;
1486 if (reta_conf[idx].mask & (1ULL << shift))
1487 reta_conf[idx].reta[shift] = vf->rss_lut[i];
1494 iavf_set_rss_key(struct iavf_adapter *adapter, uint8_t *key, uint8_t key_len)
1496 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1498 /* HENA setting, it is enabled by default, no change */
1499 if (!key || key_len == 0) {
1500 PMD_DRV_LOG(DEBUG, "No key to be configured");
1502 } else if (key_len != vf->vf_res->rss_key_size) {
1503 PMD_DRV_LOG(ERR, "The size of hash key configured "
1504 "(%d) doesn't match the size of hardware can "
1505 "support (%d)", key_len,
1506 vf->vf_res->rss_key_size);
1510 rte_memcpy(vf->rss_key, key, key_len);
1512 return iavf_configure_rss_key(adapter);
1516 iavf_dev_rss_hash_update(struct rte_eth_dev *dev,
1517 struct rte_eth_rss_conf *rss_conf)
1519 struct iavf_adapter *adapter =
1520 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1521 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1524 adapter->dev_data->dev_conf.rx_adv_conf.rss_conf = *rss_conf;
1526 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1530 ret = iavf_set_rss_key(adapter, rss_conf->rss_key,
1531 rss_conf->rss_key_len);
1535 if (rss_conf->rss_hf == 0) {
1537 ret = iavf_set_hena(adapter, 0);
1539 /* It is a workaround, temporarily allow error to be returned
1540 * due to possible lack of PF handling for hena = 0.
1543 PMD_DRV_LOG(WARNING, "fail to clean existing RSS, lack PF support");
1547 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF) {
1548 /* Clear existing RSS. */
1549 ret = iavf_set_hena(adapter, 0);
1551 /* It is a workaround, temporarily allow error to be returned
1552 * due to possible lack of PF handling for hena = 0.
1555 PMD_DRV_LOG(WARNING, "fail to clean existing RSS,"
1558 /* Set new RSS configuration. */
1559 ret = iavf_rss_hash_set(adapter, rss_conf->rss_hf, true);
1561 PMD_DRV_LOG(ERR, "fail to set new RSS");
1565 iavf_config_rss_hf(adapter, rss_conf->rss_hf);
1572 iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1573 struct rte_eth_rss_conf *rss_conf)
1575 struct iavf_adapter *adapter =
1576 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1577 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1579 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1582 rss_conf->rss_hf = vf->rss_hf;
1584 if (!rss_conf->rss_key)
1587 rss_conf->rss_key_len = vf->vf_res->rss_key_size;
1588 rte_memcpy(rss_conf->rss_key, vf->rss_key, rss_conf->rss_key_len);
1594 iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu __rte_unused)
1596 /* mtu setting is forbidden if port is start */
1597 if (dev->data->dev_started) {
1598 PMD_DRV_LOG(ERR, "port must be stopped before configuration");
1606 iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
1607 struct rte_ether_addr *mac_addr)
1609 struct iavf_adapter *adapter =
1610 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1611 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
1612 struct rte_ether_addr *old_addr;
1615 old_addr = (struct rte_ether_addr *)hw->mac.addr;
1617 if (rte_is_same_ether_addr(old_addr, mac_addr))
1620 ret = iavf_add_del_eth_addr(adapter, old_addr, false, VIRTCHNL_ETHER_ADDR_PRIMARY);
1622 PMD_DRV_LOG(ERR, "Fail to delete old MAC:"
1623 RTE_ETHER_ADDR_PRT_FMT,
1624 RTE_ETHER_ADDR_BYTES(old_addr));
1626 ret = iavf_add_del_eth_addr(adapter, mac_addr, true, VIRTCHNL_ETHER_ADDR_PRIMARY);
1628 PMD_DRV_LOG(ERR, "Fail to add new MAC:"
1629 RTE_ETHER_ADDR_PRT_FMT,
1630 RTE_ETHER_ADDR_BYTES(mac_addr));
1635 rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)hw->mac.addr);
1640 iavf_stat_update_48(uint64_t *offset, uint64_t *stat)
1642 if (*stat >= *offset)
1643 *stat = *stat - *offset;
1645 *stat = (uint64_t)((*stat +
1646 ((uint64_t)1 << IAVF_48_BIT_WIDTH)) - *offset);
1648 *stat &= IAVF_48_BIT_MASK;
1652 iavf_stat_update_32(uint64_t *offset, uint64_t *stat)
1654 if (*stat >= *offset)
1655 *stat = (uint64_t)(*stat - *offset);
1657 *stat = (uint64_t)((*stat +
1658 ((uint64_t)1 << IAVF_32_BIT_WIDTH)) - *offset);
1662 iavf_update_stats(struct iavf_vsi *vsi, struct virtchnl_eth_stats *nes)
1664 struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset.eth_stats;
1666 iavf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
1667 iavf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
1668 iavf_stat_update_48(&oes->rx_multicast, &nes->rx_multicast);
1669 iavf_stat_update_48(&oes->rx_broadcast, &nes->rx_broadcast);
1670 iavf_stat_update_32(&oes->rx_discards, &nes->rx_discards);
1671 iavf_stat_update_48(&oes->tx_bytes, &nes->tx_bytes);
1672 iavf_stat_update_48(&oes->tx_unicast, &nes->tx_unicast);
1673 iavf_stat_update_48(&oes->tx_multicast, &nes->tx_multicast);
1674 iavf_stat_update_48(&oes->tx_broadcast, &nes->tx_broadcast);
1675 iavf_stat_update_32(&oes->tx_errors, &nes->tx_errors);
1676 iavf_stat_update_32(&oes->tx_discards, &nes->tx_discards);
1680 iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1682 struct iavf_adapter *adapter =
1683 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1684 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1685 struct iavf_vsi *vsi = &vf->vsi;
1686 struct virtchnl_eth_stats *pstats = NULL;
1689 ret = iavf_query_stats(adapter, &pstats);
1691 uint8_t crc_stats_len = (dev->data->dev_conf.rxmode.offloads &
1692 RTE_ETH_RX_OFFLOAD_KEEP_CRC) ? 0 :
1694 iavf_update_stats(vsi, pstats);
1695 stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
1696 pstats->rx_broadcast - pstats->rx_discards;
1697 stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
1699 stats->imissed = pstats->rx_discards;
1700 stats->oerrors = pstats->tx_errors + pstats->tx_discards;
1701 stats->ibytes = pstats->rx_bytes;
1702 stats->ibytes -= stats->ipackets * crc_stats_len;
1703 stats->obytes = pstats->tx_bytes;
1705 PMD_DRV_LOG(ERR, "Get statistics failed");
1711 iavf_dev_stats_reset(struct rte_eth_dev *dev)
1714 struct iavf_adapter *adapter =
1715 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1716 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1717 struct iavf_vsi *vsi = &vf->vsi;
1718 struct virtchnl_eth_stats *pstats = NULL;
1720 /* read stat values to clear hardware registers */
1721 ret = iavf_query_stats(adapter, &pstats);
1725 /* set stats offset base on current values */
1726 vsi->eth_stats_offset.eth_stats = *pstats;
1732 iavf_dev_xstats_reset(struct rte_eth_dev *dev)
1734 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1735 iavf_dev_stats_reset(dev);
1736 memset(&vf->vsi.eth_stats_offset.ips_stats, 0,
1737 sizeof(struct iavf_ipsec_crypto_stats));
1741 static int iavf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1742 struct rte_eth_xstat_name *xstats_names,
1743 __rte_unused unsigned int limit)
1747 if (xstats_names != NULL)
1748 for (i = 0; i < IAVF_NB_XSTATS; i++) {
1749 snprintf(xstats_names[i].name,
1750 sizeof(xstats_names[i].name),
1751 "%s", rte_iavf_stats_strings[i].name);
1753 return IAVF_NB_XSTATS;
1757 iavf_dev_update_ipsec_xstats(struct rte_eth_dev *ethdev,
1758 struct iavf_ipsec_crypto_stats *ips)
1761 for (idx = 0; idx < ethdev->data->nb_rx_queues; idx++) {
1762 struct iavf_rx_queue *rxq;
1763 struct iavf_ipsec_crypto_stats *stats;
1764 rxq = (struct iavf_rx_queue *)ethdev->data->rx_queues[idx];
1765 stats = &rxq->stats.ipsec_crypto;
1766 ips->icount += stats->icount;
1767 ips->ibytes += stats->ibytes;
1768 ips->ierrors.count += stats->ierrors.count;
1769 ips->ierrors.sad_miss += stats->ierrors.sad_miss;
1770 ips->ierrors.not_processed += stats->ierrors.not_processed;
1771 ips->ierrors.icv_check += stats->ierrors.icv_check;
1772 ips->ierrors.ipsec_length += stats->ierrors.ipsec_length;
1773 ips->ierrors.misc += stats->ierrors.misc;
1777 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
1778 struct rte_eth_xstat *xstats, unsigned int n)
1782 struct iavf_adapter *adapter =
1783 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1784 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1785 struct iavf_vsi *vsi = &vf->vsi;
1786 struct virtchnl_eth_stats *pstats = NULL;
1787 struct iavf_eth_xstats iavf_xtats = {{0}};
1789 if (n < IAVF_NB_XSTATS)
1790 return IAVF_NB_XSTATS;
1792 ret = iavf_query_stats(adapter, &pstats);
1799 iavf_update_stats(vsi, pstats);
1800 iavf_xtats.eth_stats = *pstats;
1802 if (iavf_ipsec_crypto_supported(adapter))
1803 iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats);
1805 /* loop over xstats array and values from pstats */
1806 for (i = 0; i < IAVF_NB_XSTATS; i++) {
1808 xstats[i].value = *(uint64_t *)(((char *)&iavf_xtats) +
1809 rte_iavf_stats_strings[i].offset);
1812 return IAVF_NB_XSTATS;
1817 iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1819 struct iavf_adapter *adapter =
1820 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1821 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1822 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
1823 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1826 msix_intr = rte_intr_vec_list_index_get(pci_dev->intr_handle,
1828 if (msix_intr == IAVF_MISC_VEC_ID) {
1829 PMD_DRV_LOG(INFO, "MISC is also enabled for control");
1830 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
1831 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
1832 IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
1833 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
1836 IAVF_VFINT_DYN_CTLN1
1837 (msix_intr - IAVF_RX_VEC_START),
1838 IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
1839 IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
1840 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
1843 IAVF_WRITE_FLUSH(hw);
1845 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1846 rte_intr_ack(pci_dev->intr_handle);
1852 iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1854 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1855 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1858 msix_intr = rte_intr_vec_list_index_get(pci_dev->intr_handle,
1860 if (msix_intr == IAVF_MISC_VEC_ID) {
1861 PMD_DRV_LOG(ERR, "MISC is used for control, cannot disable it");
1866 IAVF_VFINT_DYN_CTLN1(msix_intr - IAVF_RX_VEC_START),
1869 IAVF_WRITE_FLUSH(hw);
1874 iavf_check_vf_reset_done(struct iavf_hw *hw)
1878 for (i = 0; i < IAVF_RESET_WAIT_CNT; i++) {
1879 reset = IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) &
1880 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
1881 reset = reset >> IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT;
1882 if (reset == VIRTCHNL_VFR_VFACTIVE ||
1883 reset == VIRTCHNL_VFR_COMPLETED)
1888 if (i >= IAVF_RESET_WAIT_CNT)
1895 iavf_lookup_proto_xtr_type(const char *flex_name)
1899 enum iavf_proto_xtr_type type;
1900 } xtr_type_map[] = {
1901 { "vlan", IAVF_PROTO_XTR_VLAN },
1902 { "ipv4", IAVF_PROTO_XTR_IPV4 },
1903 { "ipv6", IAVF_PROTO_XTR_IPV6 },
1904 { "ipv6_flow", IAVF_PROTO_XTR_IPV6_FLOW },
1905 { "tcp", IAVF_PROTO_XTR_TCP },
1906 { "ip_offset", IAVF_PROTO_XTR_IP_OFFSET },
1907 { "ipsec_crypto_said", IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID },
1911 for (i = 0; i < RTE_DIM(xtr_type_map); i++) {
1912 if (strcmp(flex_name, xtr_type_map[i].name) == 0)
1913 return xtr_type_map[i].type;
1916 PMD_DRV_LOG(ERR, "wrong proto_xtr type, it should be: "
1917 "vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset|ipsec_crypto_said");
1923 * Parse elem, the elem could be single number/range or '(' ')' group
1924 * 1) A single number elem, it's just a simple digit. e.g. 9
1925 * 2) A single range elem, two digits with a '-' between. e.g. 2-6
1926 * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6)
1927 * Within group elem, '-' used for a range separator;
1928 * ',' used for a single number.
1931 iavf_parse_queue_set(const char *input, int xtr_type,
1932 struct iavf_devargs *devargs)
1934 const char *str = input;
1939 while (isblank(*str))
1942 if (!isdigit(*str) && *str != '(')
1945 /* process single number or single range of number */
1948 idx = strtoul(str, &end, 10);
1949 if (errno || !end || idx >= IAVF_MAX_QUEUE_NUM)
1952 while (isblank(*end))
1958 /* process single <number>-<number> */
1961 while (isblank(*end))
1967 idx = strtoul(end, &end, 10);
1968 if (errno || !end || idx >= IAVF_MAX_QUEUE_NUM)
1972 while (isblank(*end))
1979 for (idx = RTE_MIN(min, max);
1980 idx <= RTE_MAX(min, max); idx++)
1981 devargs->proto_xtr[idx] = xtr_type;
1986 /* process set within bracket */
1988 while (isblank(*str))
1993 min = IAVF_MAX_QUEUE_NUM;
1995 /* go ahead to the first digit */
1996 while (isblank(*str))
2001 /* get the digit value */
2003 idx = strtoul(str, &end, 10);
2004 if (errno || !end || idx >= IAVF_MAX_QUEUE_NUM)
2007 /* go ahead to separator '-',',' and ')' */
2008 while (isblank(*end))
2011 if (min == IAVF_MAX_QUEUE_NUM)
2013 else /* avoid continuous '-' */
2015 } else if (*end == ',' || *end == ')') {
2017 if (min == IAVF_MAX_QUEUE_NUM)
2020 for (idx = RTE_MIN(min, max);
2021 idx <= RTE_MAX(min, max); idx++)
2022 devargs->proto_xtr[idx] = xtr_type;
2024 min = IAVF_MAX_QUEUE_NUM;
2030 } while (*end != ')' && *end != '\0');
2036 iavf_parse_queue_proto_xtr(const char *queues, struct iavf_devargs *devargs)
2038 const char *queue_start;
2043 while (isblank(*queues))
2046 if (*queues != '[') {
2047 xtr_type = iavf_lookup_proto_xtr_type(queues);
2051 devargs->proto_xtr_dflt = xtr_type;
2058 while (isblank(*queues))
2060 if (*queues == '\0')
2063 queue_start = queues;
2065 /* go across a complete bracket */
2066 if (*queue_start == '(') {
2067 queues += strcspn(queues, ")");
2072 /* scan the separator ':' */
2073 queues += strcspn(queues, ":");
2074 if (*queues++ != ':')
2076 while (isblank(*queues))
2079 for (idx = 0; ; idx++) {
2080 if (isblank(queues[idx]) ||
2081 queues[idx] == ',' ||
2082 queues[idx] == ']' ||
2083 queues[idx] == '\0')
2086 if (idx > sizeof(flex_name) - 2)
2089 flex_name[idx] = queues[idx];
2091 flex_name[idx] = '\0';
2092 xtr_type = iavf_lookup_proto_xtr_type(flex_name);
2098 while (isblank(*queues) || *queues == ',' || *queues == ']')
2101 if (iavf_parse_queue_set(queue_start, xtr_type, devargs) < 0)
2103 } while (*queues != '\0');
2109 iavf_handle_proto_xtr_arg(__rte_unused const char *key, const char *value,
2112 struct iavf_devargs *devargs = extra_args;
2114 if (!value || !extra_args)
2117 if (iavf_parse_queue_proto_xtr(value, devargs) < 0) {
2118 PMD_DRV_LOG(ERR, "the proto_xtr's parameter is wrong : '%s'",
2127 parse_u16(__rte_unused const char *key, const char *value, void *args)
2129 u16 *num = (u16 *)args;
2133 tmp = strtoull(value, NULL, 10);
2134 if (errno || !tmp) {
2135 PMD_DRV_LOG(WARNING, "%s: \"%s\" is not a valid u16",
2145 static int iavf_parse_devargs(struct rte_eth_dev *dev)
2147 struct iavf_adapter *ad =
2148 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2149 struct rte_devargs *devargs = dev->device->devargs;
2150 struct rte_kvargs *kvlist;
2156 kvlist = rte_kvargs_parse(devargs->args, iavf_valid_args);
2158 PMD_INIT_LOG(ERR, "invalid kvargs key\n");
2162 ad->devargs.proto_xtr_dflt = IAVF_PROTO_XTR_NONE;
2163 memset(ad->devargs.proto_xtr, IAVF_PROTO_XTR_NONE,
2164 sizeof(ad->devargs.proto_xtr));
2166 ret = rte_kvargs_process(kvlist, IAVF_PROTO_XTR_ARG,
2167 &iavf_handle_proto_xtr_arg, &ad->devargs);
2171 ret = rte_kvargs_process(kvlist, IAVF_QUANTA_SIZE_ARG,
2172 &parse_u16, &ad->devargs.quanta_size);
2176 if (ad->devargs.quanta_size == 0)
2177 ad->devargs.quanta_size = 1024;
2179 if (ad->devargs.quanta_size < 256 || ad->devargs.quanta_size > 4096 ||
2180 ad->devargs.quanta_size & 0x40) {
2181 PMD_INIT_LOG(ERR, "invalid quanta size\n");
2186 rte_kvargs_free(kvlist);
2191 iavf_init_proto_xtr(struct rte_eth_dev *dev)
2193 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2194 struct iavf_adapter *ad =
2195 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2196 const struct iavf_proto_xtr_ol *xtr_ol;
2197 bool proto_xtr_enable = false;
2201 vf->proto_xtr = rte_zmalloc("vf proto xtr",
2202 vf->vsi_res->num_queue_pairs, 0);
2203 if (unlikely(!(vf->proto_xtr))) {
2204 PMD_DRV_LOG(ERR, "no memory for setting up proto_xtr's table");
2208 for (i = 0; i < vf->vsi_res->num_queue_pairs; i++) {
2209 vf->proto_xtr[i] = ad->devargs.proto_xtr[i] !=
2210 IAVF_PROTO_XTR_NONE ?
2211 ad->devargs.proto_xtr[i] :
2212 ad->devargs.proto_xtr_dflt;
2214 if (vf->proto_xtr[i] != IAVF_PROTO_XTR_NONE) {
2215 uint8_t type = vf->proto_xtr[i];
2217 iavf_proto_xtr_params[type].required = true;
2218 proto_xtr_enable = true;
2222 if (likely(!proto_xtr_enable))
2225 offset = rte_mbuf_dynfield_register(&iavf_proto_xtr_metadata_param);
2226 if (unlikely(offset == -1)) {
2228 "failed to extract protocol metadata, error %d",
2234 "proto_xtr metadata offset in mbuf is : %d",
2236 rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = offset;
2238 for (i = 0; i < RTE_DIM(iavf_proto_xtr_params); i++) {
2239 xtr_ol = &iavf_proto_xtr_params[i];
2241 uint8_t rxdid = iavf_proto_xtr_type_to_rxdid((uint8_t)i);
2243 if (!xtr_ol->required)
2246 if (!(vf->supported_rxdid & BIT(rxdid))) {
2248 "rxdid[%u] is not supported in hardware",
2250 rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
2254 offset = rte_mbuf_dynflag_register(&xtr_ol->param);
2255 if (unlikely(offset == -1)) {
2257 "failed to register proto_xtr offload '%s', error %d",
2258 xtr_ol->param.name, -rte_errno);
2260 rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
2265 "proto_xtr offload '%s' offset in mbuf is : %d",
2266 xtr_ol->param.name, offset);
2267 *xtr_ol->ol_flag = 1ULL << offset;
2272 iavf_init_vf(struct rte_eth_dev *dev)
2275 struct iavf_adapter *adapter =
2276 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2277 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2278 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2282 err = iavf_parse_devargs(dev);
2284 PMD_INIT_LOG(ERR, "Failed to parse devargs");
2288 err = iavf_set_mac_type(hw);
2290 PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
2294 err = iavf_check_vf_reset_done(hw);
2296 PMD_INIT_LOG(ERR, "VF is still resetting");
2300 iavf_init_adminq_parameter(hw);
2301 err = iavf_init_adminq(hw);
2303 PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
2307 vf->aq_resp = rte_zmalloc("vf_aq_resp", IAVF_AQ_BUF_SZ, 0);
2309 PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory");
2312 if (iavf_check_api_version(adapter) != 0) {
2313 PMD_INIT_LOG(ERR, "check_api version failed");
2317 bufsz = sizeof(struct virtchnl_vf_resource) +
2318 (IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
2319 vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
2321 PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
2325 if (iavf_get_vf_resource(adapter) != 0) {
2326 PMD_INIT_LOG(ERR, "iavf_get_vf_config failed");
2329 /* Allocate memort for RSS info */
2330 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2331 vf->rss_key = rte_zmalloc("rss_key",
2332 vf->vf_res->rss_key_size, 0);
2334 PMD_INIT_LOG(ERR, "unable to allocate rss_key memory");
2337 vf->rss_lut = rte_zmalloc("rss_lut",
2338 vf->vf_res->rss_lut_size, 0);
2340 PMD_INIT_LOG(ERR, "unable to allocate rss_lut memory");
2345 if (vf->vsi_res->num_queue_pairs > IAVF_MAX_NUM_QUEUES_DFLT)
2346 vf->lv_enabled = true;
2348 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
2349 if (iavf_get_supported_rxdid(adapter) != 0) {
2350 PMD_INIT_LOG(ERR, "failed to do get supported rxdid");
2355 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
2356 if (iavf_get_vlan_offload_caps_v2(adapter) != 0) {
2357 PMD_INIT_LOG(ERR, "failed to do get VLAN offload v2 capabilities");
2362 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS) {
2363 bufsz = sizeof(struct virtchnl_qos_cap_list) +
2364 IAVF_MAX_TRAFFIC_CLASS *
2365 sizeof(struct virtchnl_qos_cap_elem);
2366 vf->qos_cap = rte_zmalloc("qos_cap", bufsz, 0);
2368 PMD_INIT_LOG(ERR, "unable to allocate qos_cap memory");
2371 iavf_tm_conf_init(dev);
2374 iavf_init_proto_xtr(dev);
2378 rte_free(vf->rss_key);
2379 rte_free(vf->rss_lut);
2381 rte_free(vf->qos_cap);
2382 rte_free(vf->vf_res);
2385 rte_free(vf->aq_resp);
2387 iavf_shutdown_adminq(hw);
2393 iavf_uninit_vf(struct rte_eth_dev *dev)
2395 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2396 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2398 iavf_shutdown_adminq(hw);
2400 rte_free(vf->vf_res);
2404 rte_free(vf->aq_resp);
2407 rte_free(vf->qos_cap);
2410 rte_free(vf->rss_lut);
2412 rte_free(vf->rss_key);
2416 /* Enable default admin queue interrupt setting */
2418 iavf_enable_irq0(struct iavf_hw *hw)
2420 /* Enable admin queue interrupt trigger */
2421 IAVF_WRITE_REG(hw, IAVF_VFINT_ICR0_ENA1,
2422 IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
2424 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
2425 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
2426 IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
2427 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
2429 IAVF_WRITE_FLUSH(hw);
2433 iavf_disable_irq0(struct iavf_hw *hw)
2435 /* Disable all interrupt types */
2436 IAVF_WRITE_REG(hw, IAVF_VFINT_ICR0_ENA1, 0);
2437 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
2438 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
2439 IAVF_WRITE_FLUSH(hw);
2443 iavf_dev_interrupt_handler(void *param)
2445 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2446 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2448 iavf_disable_irq0(hw);
2450 iavf_handle_virtchnl_msg(dev);
2452 iavf_enable_irq0(hw);
2456 iavf_dev_alarm_handler(void *param)
2458 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2459 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2462 iavf_disable_irq0(hw);
2464 /* read out interrupt causes */
2465 icr0 = IAVF_READ_REG(hw, IAVF_VFINT_ICR01);
2467 if (icr0 & IAVF_VFINT_ICR01_ADMINQ_MASK) {
2468 PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported");
2469 iavf_handle_virtchnl_msg(dev);
2472 iavf_enable_irq0(hw);
2474 rte_eal_alarm_set(IAVF_ALARM_INTERVAL,
2475 iavf_dev_alarm_handler, dev);
2479 iavf_dev_flow_ops_get(struct rte_eth_dev *dev,
2480 const struct rte_flow_ops **ops)
2485 *ops = &iavf_flow_ops;
2490 iavf_default_rss_disable(struct iavf_adapter *adapter)
2492 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
2495 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2496 /* Set hena = 0 to ask PF to cleanup all existing RSS. */
2497 ret = iavf_set_hena(adapter, 0);
2499 /* It is a workaround, temporarily allow error to be
2500 * returned due to possible lack of PF handling for
2503 PMD_INIT_LOG(WARNING, "fail to disable default RSS,"
2509 iavf_dev_init(struct rte_eth_dev *eth_dev)
2511 struct iavf_adapter *adapter =
2512 IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
2513 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
2514 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
2515 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2518 PMD_INIT_FUNC_TRACE();
2520 /* assign ops func pointer */
2521 eth_dev->dev_ops = &iavf_eth_dev_ops;
2522 eth_dev->rx_queue_count = iavf_dev_rxq_count;
2523 eth_dev->rx_descriptor_status = iavf_dev_rx_desc_status;
2524 eth_dev->tx_descriptor_status = iavf_dev_tx_desc_status;
2525 eth_dev->rx_pkt_burst = &iavf_recv_pkts;
2526 eth_dev->tx_pkt_burst = &iavf_xmit_pkts;
2527 eth_dev->tx_pkt_prepare = &iavf_prep_pkts;
2529 /* For secondary processes, we don't initialise any further as primary
2530 * has already done this work. Only check if we need a different RX
2533 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2534 iavf_set_rx_function(eth_dev);
2535 iavf_set_tx_function(eth_dev);
2538 rte_eth_copy_pci_info(eth_dev, pci_dev);
2540 hw->vendor_id = pci_dev->id.vendor_id;
2541 hw->device_id = pci_dev->id.device_id;
2542 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2543 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
2544 hw->bus.bus_id = pci_dev->addr.bus;
2545 hw->bus.device = pci_dev->addr.devid;
2546 hw->bus.func = pci_dev->addr.function;
2547 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
2548 hw->back = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
2549 adapter->dev_data = eth_dev->data;
2550 adapter->stopped = 1;
2552 if (iavf_init_vf(eth_dev) != 0) {
2553 PMD_INIT_LOG(ERR, "Init vf failed");
2557 /* set default ptype table */
2558 iavf_set_default_ptype_table(eth_dev);
2561 eth_dev->data->mac_addrs = rte_zmalloc(
2562 "iavf_mac", RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX, 0);
2563 if (!eth_dev->data->mac_addrs) {
2564 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
2565 " store MAC addresses",
2566 RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX);
2570 /* If the MAC address is not configured by host,
2571 * generate a random one.
2573 if (!rte_is_valid_assigned_ether_addr(
2574 (struct rte_ether_addr *)hw->mac.addr))
2575 rte_eth_random_addr(hw->mac.addr);
2576 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
2577 ð_dev->data->mac_addrs[0]);
2579 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
2580 /* register callback func to eal lib */
2581 rte_intr_callback_register(pci_dev->intr_handle,
2582 iavf_dev_interrupt_handler,
2585 /* enable uio intr after callback register */
2586 rte_intr_enable(pci_dev->intr_handle);
2588 rte_eal_alarm_set(IAVF_ALARM_INTERVAL,
2589 iavf_dev_alarm_handler, eth_dev);
2592 /* configure and enable device interrupt */
2593 iavf_enable_irq0(hw);
2595 ret = iavf_flow_init(adapter);
2597 PMD_INIT_LOG(ERR, "Failed to initialize flow");
2601 /** Check if the IPsec Crypto offload is supported and create
2602 * security_ctx if it is.
2604 if (iavf_ipsec_crypto_supported(adapter)) {
2605 /* Initialize security_ctx only for primary process*/
2606 ret = iavf_security_ctx_create(adapter);
2608 PMD_INIT_LOG(ERR, "failed to create ipsec crypto security instance");
2612 ret = iavf_security_init(adapter);
2614 PMD_INIT_LOG(ERR, "failed to initialized ipsec crypto resources");
2619 iavf_default_rss_disable(adapter);
2622 /* Start device watchdog */
2623 iavf_dev_watchdog_enable(adapter);
2629 rte_free(eth_dev->data->mac_addrs);
2630 eth_dev->data->mac_addrs = NULL;
2633 iavf_uninit_vf(eth_dev);
2639 iavf_dev_close(struct rte_eth_dev *dev)
2641 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2642 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2643 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2644 struct iavf_adapter *adapter =
2645 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2646 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2649 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2652 ret = iavf_dev_stop(dev);
2654 iavf_flow_flush(dev, NULL);
2655 iavf_flow_uninit(adapter);
2658 * disable promiscuous mode before reset vf
2659 * it is a workaround solution when work with kernel driver
2660 * and it is not the normal way
2662 if (vf->promisc_unicast_enabled || vf->promisc_multicast_enabled)
2663 iavf_config_promisc(adapter, false, false);
2665 iavf_shutdown_adminq(hw);
2666 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
2667 /* disable uio intr before callback unregister */
2668 rte_intr_disable(intr_handle);
2670 /* unregister callback func from eal lib */
2671 rte_intr_callback_unregister(intr_handle,
2672 iavf_dev_interrupt_handler, dev);
2674 rte_eal_alarm_cancel(iavf_dev_alarm_handler, dev);
2676 iavf_disable_irq0(hw);
2678 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
2679 iavf_tm_conf_uninit(dev);
2681 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2683 rte_free(vf->rss_lut);
2687 rte_free(vf->rss_key);
2692 rte_free(vf->vf_res);
2696 rte_free(vf->aq_resp);
2700 * If the VF is reset via VFLR, the device will be knocked out of bus
2701 * master mode, and the driver will fail to recover from the reset. Fix
2702 * this by enabling bus mastering after every reset. In a non-VFLR case,
2703 * the bus master bit will not be disabled, and this call will have no
2706 if (vf->vf_reset && !rte_pci_set_bus_master(pci_dev, true))
2707 vf->vf_reset = false;
2709 /* disable watchdog */
2710 iavf_dev_watchdog_disable(adapter);
2716 iavf_dev_uninit(struct rte_eth_dev *dev)
2718 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2721 iavf_dev_close(dev);
2727 * Reset VF device only to re-initialize resources in PMD layer
2730 iavf_dev_reset(struct rte_eth_dev *dev)
2734 ret = iavf_dev_uninit(dev);
2738 return iavf_dev_init(dev);
2742 iavf_dcf_cap_check_handler(__rte_unused const char *key,
2743 const char *value, __rte_unused void *opaque)
2745 if (strcmp(value, "dcf"))
2752 iavf_dcf_cap_selected(struct rte_devargs *devargs)
2754 struct rte_kvargs *kvlist;
2755 const char *key = "cap";
2758 if (devargs == NULL)
2761 kvlist = rte_kvargs_parse(devargs->args, NULL);
2765 if (!rte_kvargs_count(kvlist, key))
2768 /* dcf capability selected when there's a key-value pair: cap=dcf */
2769 if (rte_kvargs_process(kvlist, key,
2770 iavf_dcf_cap_check_handler, NULL) < 0)
2776 rte_kvargs_free(kvlist);
2780 static int eth_iavf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2781 struct rte_pci_device *pci_dev)
2783 if (iavf_dcf_cap_selected(pci_dev->device.devargs))
2786 return rte_eth_dev_pci_generic_probe(pci_dev,
2787 sizeof(struct iavf_adapter), iavf_dev_init);
2790 static int eth_iavf_pci_remove(struct rte_pci_device *pci_dev)
2792 return rte_eth_dev_pci_generic_remove(pci_dev, iavf_dev_uninit);
2795 /* Adaptive virtual function driver struct */
2796 static struct rte_pci_driver rte_iavf_pmd = {
2797 .id_table = pci_id_iavf_map,
2798 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2799 .probe = eth_iavf_pci_probe,
2800 .remove = eth_iavf_pci_remove,
2803 RTE_PMD_REGISTER_PCI(net_iavf, rte_iavf_pmd);
2804 RTE_PMD_REGISTER_PCI_TABLE(net_iavf, pci_id_iavf_map);
2805 RTE_PMD_REGISTER_KMOD_DEP(net_iavf, "* igb_uio | vfio-pci");
2806 RTE_PMD_REGISTER_PARAM_STRING(net_iavf, "cap=dcf");
2807 RTE_LOG_REGISTER_SUFFIX(iavf_logtype_init, init, NOTICE);
2808 RTE_LOG_REGISTER_SUFFIX(iavf_logtype_driver, driver, NOTICE);
2809 #ifdef RTE_ETHDEV_DEBUG_RX
2810 RTE_LOG_REGISTER_SUFFIX(iavf_logtype_rx, rx, DEBUG);
2812 #ifdef RTE_ETHDEV_DEBUG_TX
2813 RTE_LOG_REGISTER_SUFFIX(iavf_logtype_tx, tx, DEBUG);