1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
16 #include <rte_interrupts.h>
17 #include <rte_debug.h>
19 #include <rte_alarm.h>
20 #include <rte_atomic.h>
22 #include <rte_ether.h>
23 #include <ethdev_driver.h>
24 #include <ethdev_pci.h>
25 #include <rte_malloc.h>
26 #include <rte_memzone.h>
30 #include "iavf_rxtx.h"
31 #include "iavf_generic_flow.h"
32 #include "rte_pmd_iavf.h"
33 #include "iavf_ipsec_crypto.h"
36 #define IAVF_PROTO_XTR_ARG "proto_xtr"
37 #define IAVF_QUANTA_SIZE_ARG "quanta_size"
39 static const char * const iavf_valid_args[] = {
45 static const struct rte_mbuf_dynfield iavf_proto_xtr_metadata_param = {
46 .name = "intel_pmd_dynfield_proto_xtr_metadata",
47 .size = sizeof(uint32_t),
48 .align = __alignof__(uint32_t),
52 struct iavf_proto_xtr_ol {
53 const struct rte_mbuf_dynflag param;
58 static struct iavf_proto_xtr_ol iavf_proto_xtr_params[] = {
59 [IAVF_PROTO_XTR_VLAN] = {
60 .param = { .name = "intel_pmd_dynflag_proto_xtr_vlan" },
61 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_vlan_mask },
62 [IAVF_PROTO_XTR_IPV4] = {
63 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv4" },
64 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask },
65 [IAVF_PROTO_XTR_IPV6] = {
66 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6" },
67 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask },
68 [IAVF_PROTO_XTR_IPV6_FLOW] = {
69 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6_flow" },
70 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask },
71 [IAVF_PROTO_XTR_TCP] = {
72 .param = { .name = "intel_pmd_dynflag_proto_xtr_tcp" },
73 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_tcp_mask },
74 [IAVF_PROTO_XTR_IP_OFFSET] = {
75 .param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
76 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask },
77 [IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] = {
79 .name = "intel_pmd_dynflag_proto_xtr_ipsec_crypto_said" },
81 &rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask },
84 static int iavf_dev_configure(struct rte_eth_dev *dev);
85 static int iavf_dev_start(struct rte_eth_dev *dev);
86 static int iavf_dev_stop(struct rte_eth_dev *dev);
87 static int iavf_dev_close(struct rte_eth_dev *dev);
88 static int iavf_dev_reset(struct rte_eth_dev *dev);
89 static int iavf_dev_info_get(struct rte_eth_dev *dev,
90 struct rte_eth_dev_info *dev_info);
91 static const uint32_t *iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
92 static int iavf_dev_stats_get(struct rte_eth_dev *dev,
93 struct rte_eth_stats *stats);
94 static int iavf_dev_stats_reset(struct rte_eth_dev *dev);
95 static int iavf_dev_xstats_reset(struct rte_eth_dev *dev);
96 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
97 struct rte_eth_xstat *xstats, unsigned int n);
98 static int iavf_dev_xstats_get_names(struct rte_eth_dev *dev,
99 struct rte_eth_xstat_name *xstats_names,
101 static int iavf_dev_promiscuous_enable(struct rte_eth_dev *dev);
102 static int iavf_dev_promiscuous_disable(struct rte_eth_dev *dev);
103 static int iavf_dev_allmulticast_enable(struct rte_eth_dev *dev);
104 static int iavf_dev_allmulticast_disable(struct rte_eth_dev *dev);
105 static int iavf_dev_add_mac_addr(struct rte_eth_dev *dev,
106 struct rte_ether_addr *addr,
109 static void iavf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index);
110 static int iavf_dev_vlan_filter_set(struct rte_eth_dev *dev,
111 uint16_t vlan_id, int on);
112 static int iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
113 static int iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
114 struct rte_eth_rss_reta_entry64 *reta_conf,
116 static int iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
117 struct rte_eth_rss_reta_entry64 *reta_conf,
119 static int iavf_dev_rss_hash_update(struct rte_eth_dev *dev,
120 struct rte_eth_rss_conf *rss_conf);
121 static int iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
122 struct rte_eth_rss_conf *rss_conf);
123 static int iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
124 static int iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
125 struct rte_ether_addr *mac_addr);
126 static int iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
128 static int iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
130 static int iavf_dev_flow_ops_get(struct rte_eth_dev *dev,
131 const struct rte_flow_ops **ops);
132 static int iavf_set_mc_addr_list(struct rte_eth_dev *dev,
133 struct rte_ether_addr *mc_addrs,
134 uint32_t mc_addrs_num);
135 static int iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg);
137 static const struct rte_pci_id pci_id_iavf_map[] = {
138 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
139 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_VF) },
140 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_VF_HV) },
141 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_X722_VF) },
142 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_X722_A0_VF) },
143 { .vendor_id = 0, /* sentinel */ },
146 struct rte_iavf_xstats_name_off {
147 char name[RTE_ETH_XSTATS_NAME_SIZE];
151 #define _OFF_OF(a) offsetof(struct iavf_eth_xstats, a)
152 static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
153 {"rx_bytes", _OFF_OF(eth_stats.rx_bytes)},
154 {"rx_unicast_packets", _OFF_OF(eth_stats.rx_unicast)},
155 {"rx_multicast_packets", _OFF_OF(eth_stats.rx_multicast)},
156 {"rx_broadcast_packets", _OFF_OF(eth_stats.rx_broadcast)},
157 {"rx_dropped_packets", _OFF_OF(eth_stats.rx_discards)},
158 {"rx_unknown_protocol_packets", offsetof(struct iavf_eth_stats,
159 rx_unknown_protocol)},
160 {"tx_bytes", _OFF_OF(eth_stats.tx_bytes)},
161 {"tx_unicast_packets", _OFF_OF(eth_stats.tx_unicast)},
162 {"tx_multicast_packets", _OFF_OF(eth_stats.tx_multicast)},
163 {"tx_broadcast_packets", _OFF_OF(eth_stats.tx_broadcast)},
164 {"tx_dropped_packets", _OFF_OF(eth_stats.tx_discards)},
165 {"tx_error_packets", _OFF_OF(eth_stats.tx_errors)},
167 {"inline_ipsec_crypto_ipackets", _OFF_OF(ips_stats.icount)},
168 {"inline_ipsec_crypto_ibytes", _OFF_OF(ips_stats.ibytes)},
169 {"inline_ipsec_crypto_ierrors", _OFF_OF(ips_stats.ierrors.count)},
170 {"inline_ipsec_crypto_ierrors_sad_lookup",
171 _OFF_OF(ips_stats.ierrors.sad_miss)},
172 {"inline_ipsec_crypto_ierrors_not_processed",
173 _OFF_OF(ips_stats.ierrors.not_processed)},
174 {"inline_ipsec_crypto_ierrors_icv_fail",
175 _OFF_OF(ips_stats.ierrors.icv_check)},
176 {"inline_ipsec_crypto_ierrors_length",
177 _OFF_OF(ips_stats.ierrors.ipsec_length)},
178 {"inline_ipsec_crypto_ierrors_misc",
179 _OFF_OF(ips_stats.ierrors.misc)},
183 #define IAVF_NB_XSTATS (sizeof(rte_iavf_stats_strings) / \
184 sizeof(rte_iavf_stats_strings[0]))
186 static const struct eth_dev_ops iavf_eth_dev_ops = {
187 .dev_configure = iavf_dev_configure,
188 .dev_start = iavf_dev_start,
189 .dev_stop = iavf_dev_stop,
190 .dev_close = iavf_dev_close,
191 .dev_reset = iavf_dev_reset,
192 .dev_infos_get = iavf_dev_info_get,
193 .dev_supported_ptypes_get = iavf_dev_supported_ptypes_get,
194 .link_update = iavf_dev_link_update,
195 .stats_get = iavf_dev_stats_get,
196 .stats_reset = iavf_dev_stats_reset,
197 .xstats_get = iavf_dev_xstats_get,
198 .xstats_get_names = iavf_dev_xstats_get_names,
199 .xstats_reset = iavf_dev_xstats_reset,
200 .promiscuous_enable = iavf_dev_promiscuous_enable,
201 .promiscuous_disable = iavf_dev_promiscuous_disable,
202 .allmulticast_enable = iavf_dev_allmulticast_enable,
203 .allmulticast_disable = iavf_dev_allmulticast_disable,
204 .mac_addr_add = iavf_dev_add_mac_addr,
205 .mac_addr_remove = iavf_dev_del_mac_addr,
206 .set_mc_addr_list = iavf_set_mc_addr_list,
207 .vlan_filter_set = iavf_dev_vlan_filter_set,
208 .vlan_offload_set = iavf_dev_vlan_offload_set,
209 .rx_queue_start = iavf_dev_rx_queue_start,
210 .rx_queue_stop = iavf_dev_rx_queue_stop,
211 .tx_queue_start = iavf_dev_tx_queue_start,
212 .tx_queue_stop = iavf_dev_tx_queue_stop,
213 .rx_queue_setup = iavf_dev_rx_queue_setup,
214 .rx_queue_release = iavf_dev_rx_queue_release,
215 .tx_queue_setup = iavf_dev_tx_queue_setup,
216 .tx_queue_release = iavf_dev_tx_queue_release,
217 .mac_addr_set = iavf_dev_set_default_mac_addr,
218 .reta_update = iavf_dev_rss_reta_update,
219 .reta_query = iavf_dev_rss_reta_query,
220 .rss_hash_update = iavf_dev_rss_hash_update,
221 .rss_hash_conf_get = iavf_dev_rss_hash_conf_get,
222 .rxq_info_get = iavf_dev_rxq_info_get,
223 .txq_info_get = iavf_dev_txq_info_get,
224 .mtu_set = iavf_dev_mtu_set,
225 .rx_queue_intr_enable = iavf_dev_rx_queue_intr_enable,
226 .rx_queue_intr_disable = iavf_dev_rx_queue_intr_disable,
227 .flow_ops_get = iavf_dev_flow_ops_get,
228 .tx_done_cleanup = iavf_dev_tx_done_cleanup,
229 .get_monitor_addr = iavf_get_monitor_addr,
230 .tm_ops_get = iavf_tm_ops_get,
234 iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
240 *(const void **)arg = &iavf_tm_ops;
247 iavf_vfr_inprogress(struct iavf_hw *hw)
251 if ((IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) &
252 IAVF_VFGEN_RSTAT_VFR_STATE_MASK) ==
253 VIRTCHNL_VFR_INPROGRESS)
257 PMD_DRV_LOG(INFO, "Watchdog detected VFR in progress");
264 iavf_dev_watchdog(void *cb_arg)
266 struct iavf_adapter *adapter = cb_arg;
267 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
268 int vfr_inprogress = 0, rc = 0;
270 /* check if watchdog has been disabled since last call */
271 if (!adapter->vf.watchdog_enabled)
274 /* If in reset then poll vfr_inprogress register for completion */
275 if (adapter->vf.vf_reset) {
276 vfr_inprogress = iavf_vfr_inprogress(hw);
278 if (!vfr_inprogress) {
279 PMD_DRV_LOG(INFO, "VF \"%s\" reset has completed",
280 adapter->vf.eth_dev->data->name);
281 adapter->vf.vf_reset = false;
283 /* If not in reset then poll vfr_inprogress register for VFLR event */
285 vfr_inprogress = iavf_vfr_inprogress(hw);
287 if (vfr_inprogress) {
289 "VF \"%s\" reset event detected by watchdog",
290 adapter->vf.eth_dev->data->name);
292 /* enter reset state with VFLR event */
293 adapter->vf.vf_reset = true;
295 rte_eth_dev_callback_process(adapter->vf.eth_dev,
296 RTE_ETH_EVENT_INTR_RESET, NULL);
300 /* re-alarm watchdog */
301 rc = rte_eal_alarm_set(IAVF_DEV_WATCHDOG_PERIOD,
302 &iavf_dev_watchdog, cb_arg);
305 PMD_DRV_LOG(ERR, "Failed \"%s\" to reset device watchdog alarm",
306 adapter->vf.eth_dev->data->name);
310 iavf_dev_watchdog_enable(struct iavf_adapter *adapter __rte_unused)
312 #if (IAVF_DEV_WATCHDOG_PERIOD > 0)
313 PMD_DRV_LOG(INFO, "Enabling device watchdog");
314 adapter->vf.watchdog_enabled = true;
315 if (rte_eal_alarm_set(IAVF_DEV_WATCHDOG_PERIOD,
316 &iavf_dev_watchdog, (void *)adapter))
317 PMD_DRV_LOG(ERR, "Failed to enabled device watchdog");
322 iavf_dev_watchdog_disable(struct iavf_adapter *adapter __rte_unused)
324 #if (IAVF_DEV_WATCHDOG_PERIOD > 0)
325 PMD_DRV_LOG(INFO, "Disabling device watchdog");
326 adapter->vf.watchdog_enabled = false;
331 iavf_set_mc_addr_list(struct rte_eth_dev *dev,
332 struct rte_ether_addr *mc_addrs,
333 uint32_t mc_addrs_num)
335 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
336 struct iavf_adapter *adapter =
337 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
340 if (mc_addrs_num > IAVF_NUM_MACADDR_MAX) {
342 "can't add more than a limited number (%u) of addresses.",
343 (uint32_t)IAVF_NUM_MACADDR_MAX);
347 /* flush previous addresses */
348 err = iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
354 err = iavf_add_del_mc_addr_list(adapter, mc_addrs, mc_addrs_num, true);
357 /* if adding mac address list fails, should add the previous
360 ret = iavf_add_del_mc_addr_list(adapter, vf->mc_addrs,
361 vf->mc_addrs_num, true);
365 vf->mc_addrs_num = mc_addrs_num;
367 mc_addrs, mc_addrs_num * sizeof(*mc_addrs));
374 iavf_config_rss_hf(struct iavf_adapter *adapter, uint64_t rss_hf)
376 static const uint64_t map_hena_rss[] = {
378 [IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
379 RTE_ETH_RSS_NONFRAG_IPV4_UDP,
380 [IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
381 RTE_ETH_RSS_NONFRAG_IPV4_UDP,
382 [IAVF_FILTER_PCTYPE_NONF_IPV4_UDP] =
383 RTE_ETH_RSS_NONFRAG_IPV4_UDP,
384 [IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
385 RTE_ETH_RSS_NONFRAG_IPV4_TCP,
386 [IAVF_FILTER_PCTYPE_NONF_IPV4_TCP] =
387 RTE_ETH_RSS_NONFRAG_IPV4_TCP,
388 [IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP] =
389 RTE_ETH_RSS_NONFRAG_IPV4_SCTP,
390 [IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER] =
391 RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
392 [IAVF_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_RSS_FRAG_IPV4,
395 [IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
396 RTE_ETH_RSS_NONFRAG_IPV6_UDP,
397 [IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
398 RTE_ETH_RSS_NONFRAG_IPV6_UDP,
399 [IAVF_FILTER_PCTYPE_NONF_IPV6_UDP] =
400 RTE_ETH_RSS_NONFRAG_IPV6_UDP,
401 [IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
402 RTE_ETH_RSS_NONFRAG_IPV6_TCP,
403 [IAVF_FILTER_PCTYPE_NONF_IPV6_TCP] =
404 RTE_ETH_RSS_NONFRAG_IPV6_TCP,
405 [IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP] =
406 RTE_ETH_RSS_NONFRAG_IPV6_SCTP,
407 [IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER] =
408 RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
409 [IAVF_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_RSS_FRAG_IPV6,
412 [IAVF_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_RSS_L2_PAYLOAD
415 const uint64_t ipv4_rss = RTE_ETH_RSS_NONFRAG_IPV4_UDP |
416 RTE_ETH_RSS_NONFRAG_IPV4_TCP |
417 RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
418 RTE_ETH_RSS_NONFRAG_IPV4_OTHER |
419 RTE_ETH_RSS_FRAG_IPV4;
421 const uint64_t ipv6_rss = RTE_ETH_RSS_NONFRAG_IPV6_UDP |
422 RTE_ETH_RSS_NONFRAG_IPV6_TCP |
423 RTE_ETH_RSS_NONFRAG_IPV6_SCTP |
424 RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
425 RTE_ETH_RSS_FRAG_IPV6;
427 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
428 uint64_t caps = 0, hena = 0, valid_rss_hf = 0;
432 ret = iavf_get_hena_caps(adapter, &caps);
435 * RSS offload type configuration is not a necessary feature
436 * for VF, so here just print a warning and return.
439 "fail to get RSS offload type caps, ret: %d", ret);
444 * RTE_ETH_RSS_IPV4 and RTE_ETH_RSS_IPV6 can be considered as 2
445 * generalizations of all other IPv4 and IPv6 RSS types.
447 if (rss_hf & RTE_ETH_RSS_IPV4)
450 if (rss_hf & RTE_ETH_RSS_IPV6)
453 RTE_BUILD_BUG_ON(RTE_DIM(map_hena_rss) > sizeof(uint64_t) * CHAR_BIT);
455 for (i = 0; i < RTE_DIM(map_hena_rss); i++) {
456 uint64_t bit = BIT_ULL(i);
458 if ((caps & bit) && (map_hena_rss[i] & rss_hf)) {
459 valid_rss_hf |= map_hena_rss[i];
464 ret = iavf_set_hena(adapter, hena);
467 * RSS offload type configuration is not a necessary feature
468 * for VF, so here just print a warning and return.
471 "fail to set RSS offload types, ret: %d", ret);
475 if (valid_rss_hf & ipv4_rss)
476 valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV4;
478 if (valid_rss_hf & ipv6_rss)
479 valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV6;
481 if (rss_hf & ~valid_rss_hf)
482 PMD_DRV_LOG(WARNING, "Unsupported rss_hf 0x%" PRIx64,
483 rss_hf & ~valid_rss_hf);
485 vf->rss_hf = valid_rss_hf;
489 iavf_init_rss(struct iavf_adapter *adapter)
491 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
492 struct rte_eth_rss_conf *rss_conf;
496 rss_conf = &adapter->dev_data->dev_conf.rx_adv_conf.rss_conf;
497 nb_q = RTE_MIN(adapter->dev_data->nb_rx_queues,
498 vf->max_rss_qregion);
500 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
501 PMD_DRV_LOG(DEBUG, "RSS is not supported");
505 /* configure RSS key */
506 if (!rss_conf->rss_key) {
507 /* Calculate the default hash key */
508 for (i = 0; i < vf->vf_res->rss_key_size; i++)
509 vf->rss_key[i] = (uint8_t)rte_rand();
511 rte_memcpy(vf->rss_key, rss_conf->rss_key,
512 RTE_MIN(rss_conf->rss_key_len,
513 vf->vf_res->rss_key_size));
515 /* init RSS LUT table */
516 for (i = 0, j = 0; i < vf->vf_res->rss_lut_size; i++, j++) {
521 /* send virtchnl ops to configure RSS */
522 ret = iavf_configure_rss_lut(adapter);
525 ret = iavf_configure_rss_key(adapter);
529 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF) {
530 /* Set RSS hash configuration based on rss_conf->rss_hf. */
531 ret = iavf_rss_hash_set(adapter, rss_conf->rss_hf, true);
533 PMD_DRV_LOG(ERR, "fail to set default RSS");
537 iavf_config_rss_hf(adapter, rss_conf->rss_hf);
544 iavf_queues_req_reset(struct rte_eth_dev *dev, uint16_t num)
546 struct iavf_adapter *ad =
547 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
548 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
551 ret = iavf_request_queues(dev, num);
553 PMD_DRV_LOG(ERR, "request queues from PF failed");
556 PMD_DRV_LOG(INFO, "change queue pairs from %u to %u",
557 vf->vsi_res->num_queue_pairs, num);
559 ret = iavf_dev_reset(dev);
561 PMD_DRV_LOG(ERR, "vf reset failed");
569 iavf_dev_vlan_insert_set(struct rte_eth_dev *dev)
571 struct iavf_adapter *adapter =
572 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
573 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
576 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2))
579 enable = !!(dev->data->dev_conf.txmode.offloads &
580 RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
581 iavf_config_vlan_insert_v2(adapter, enable);
587 iavf_dev_init_vlan(struct rte_eth_dev *dev)
591 err = iavf_dev_vlan_offload_set(dev,
592 RTE_ETH_VLAN_STRIP_MASK |
593 RTE_ETH_QINQ_STRIP_MASK |
594 RTE_ETH_VLAN_FILTER_MASK |
595 RTE_ETH_VLAN_EXTEND_MASK);
597 PMD_DRV_LOG(ERR, "Failed to update vlan offload");
601 err = iavf_dev_vlan_insert_set(dev);
603 PMD_DRV_LOG(ERR, "Failed to update vlan insertion");
609 iavf_dev_configure(struct rte_eth_dev *dev)
611 struct iavf_adapter *ad =
612 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
613 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
614 uint16_t num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
615 dev->data->nb_tx_queues);
618 ad->rx_bulk_alloc_allowed = true;
619 /* Initialize to TRUE. If any of Rx queues doesn't meet the
620 * vector Rx/Tx preconditions, it will be reset.
622 ad->rx_vec_allowed = true;
623 ad->tx_vec_allowed = true;
625 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
626 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
628 /* Large VF setting */
629 if (num_queue_pairs > IAVF_MAX_NUM_QUEUES_DFLT) {
630 if (!(vf->vf_res->vf_cap_flags &
631 VIRTCHNL_VF_LARGE_NUM_QPAIRS)) {
632 PMD_DRV_LOG(ERR, "large VF is not supported");
636 if (num_queue_pairs > IAVF_MAX_NUM_QUEUES_LV) {
637 PMD_DRV_LOG(ERR, "queue pairs number cannot be larger than %u",
638 IAVF_MAX_NUM_QUEUES_LV);
642 ret = iavf_queues_req_reset(dev, num_queue_pairs);
646 ret = iavf_get_max_rss_queue_region(ad);
648 PMD_INIT_LOG(ERR, "get max rss queue region failed");
652 vf->lv_enabled = true;
654 /* Check if large VF is already enabled. If so, disable and
655 * release redundant queue resource.
656 * Or check if enough queue pairs. If not, request them from PF.
658 if (vf->lv_enabled ||
659 num_queue_pairs > vf->vsi_res->num_queue_pairs) {
660 ret = iavf_queues_req_reset(dev, num_queue_pairs);
664 vf->lv_enabled = false;
666 /* if large VF is not required, use default rss queue region */
667 vf->max_rss_qregion = IAVF_MAX_NUM_QUEUES_DFLT;
670 ret = iavf_dev_init_vlan(dev);
672 PMD_DRV_LOG(ERR, "configure VLAN failed: %d", ret);
674 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
675 if (iavf_init_rss(ad) != 0) {
676 PMD_DRV_LOG(ERR, "configure rss failed");
684 iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
686 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
687 struct rte_eth_dev_data *dev_data = dev->data;
688 uint16_t buf_size, max_pkt_len;
689 uint32_t frame_size = dev->data->mtu + IAVF_ETH_OVERHEAD;
691 buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
693 /* Calculate the maximum packet length allowed */
694 max_pkt_len = RTE_MIN((uint32_t)
695 rxq->rx_buf_len * IAVF_MAX_CHAINED_RX_BUFFERS,
698 /* Check if maximum packet length is set correctly. */
699 if (max_pkt_len <= RTE_ETHER_MIN_LEN ||
700 max_pkt_len > IAVF_FRAME_SIZE_MAX) {
701 PMD_DRV_LOG(ERR, "maximum packet length must be "
702 "larger than %u and smaller than %u",
703 (uint32_t)IAVF_ETH_MAX_LEN,
704 (uint32_t)IAVF_FRAME_SIZE_MAX);
708 rxq->max_pkt_len = max_pkt_len;
709 if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
710 rxq->max_pkt_len > buf_size) {
711 dev_data->scattered_rx = 1;
713 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
714 IAVF_WRITE_FLUSH(hw);
720 iavf_init_queues(struct rte_eth_dev *dev)
722 struct iavf_rx_queue **rxq =
723 (struct iavf_rx_queue **)dev->data->rx_queues;
724 int i, ret = IAVF_SUCCESS;
726 for (i = 0; i < dev->data->nb_rx_queues; i++) {
727 if (!rxq[i] || !rxq[i]->q_set)
729 ret = iavf_init_rxq(dev, rxq[i]);
730 if (ret != IAVF_SUCCESS)
733 /* set rx/tx function to vector/scatter/single-segment
734 * according to parameters
736 iavf_set_rx_function(dev);
737 iavf_set_tx_function(dev);
742 static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev,
743 struct rte_intr_handle *intr_handle)
745 struct iavf_adapter *adapter =
746 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
747 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
748 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
749 struct iavf_qv_map *qv_map;
750 uint16_t interval, i;
753 if (rte_intr_cap_multiple(intr_handle) &&
754 dev->data->dev_conf.intr_conf.rxq) {
755 if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues))
759 if (rte_intr_dp_is_en(intr_handle)) {
760 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
761 dev->data->nb_rx_queues)) {
762 PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
763 dev->data->nb_rx_queues);
769 qv_map = rte_zmalloc("qv_map",
770 dev->data->nb_rx_queues * sizeof(struct iavf_qv_map), 0);
772 PMD_DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
773 dev->data->nb_rx_queues);
774 goto qv_map_alloc_err;
777 if (!dev->data->dev_conf.intr_conf.rxq ||
778 !rte_intr_dp_is_en(intr_handle)) {
779 /* Rx interrupt disabled, Map interrupt only for writeback */
781 if (vf->vf_res->vf_cap_flags &
782 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
783 /* If WB_ON_ITR supports, enable it */
784 vf->msix_base = IAVF_RX_VEC_START;
785 /* Set the ITR for index zero, to 2us to make sure that
786 * we leave time for aggregation to occur, but don't
787 * increase latency dramatically.
790 IAVF_VFINT_DYN_CTLN1(vf->msix_base - 1),
791 (0 << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
792 IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
793 (2UL << IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT));
794 /* debug - check for success! the return value
795 * should be 2, offset is 0x2800
797 /* IAVF_READ_REG(hw, IAVF_VFINT_ITRN1(0, 0)); */
799 /* If no WB_ON_ITR offload flags, need to set
800 * interrupt for descriptor write back.
802 vf->msix_base = IAVF_MISC_VEC_ID;
804 /* set ITR to default */
805 interval = iavf_calc_itr_interval(
806 IAVF_QUEUE_ITR_INTERVAL_DEFAULT);
807 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
808 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
809 (IAVF_ITR_INDEX_DEFAULT <<
810 IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
812 IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT));
814 IAVF_WRITE_FLUSH(hw);
815 /* map all queues to the same interrupt */
816 for (i = 0; i < dev->data->nb_rx_queues; i++) {
817 qv_map[i].queue_id = i;
818 qv_map[i].vector_id = vf->msix_base;
822 if (!rte_intr_allow_others(intr_handle)) {
824 vf->msix_base = IAVF_MISC_VEC_ID;
825 for (i = 0; i < dev->data->nb_rx_queues; i++) {
826 qv_map[i].queue_id = i;
827 qv_map[i].vector_id = vf->msix_base;
828 rte_intr_vec_list_index_set(intr_handle,
829 i, IAVF_MISC_VEC_ID);
833 "vector %u are mapping to all Rx queues",
836 /* If Rx interrupt is required, and we can use
837 * multi interrupts, then the vec is from 1
840 RTE_MIN(rte_intr_nb_efd_get(intr_handle),
841 (uint16_t)(vf->vf_res->max_vectors - 1));
842 vf->msix_base = IAVF_RX_VEC_START;
843 vec = IAVF_RX_VEC_START;
844 for (i = 0; i < dev->data->nb_rx_queues; i++) {
845 qv_map[i].queue_id = i;
846 qv_map[i].vector_id = vec;
847 rte_intr_vec_list_index_set(intr_handle,
849 if (vec >= vf->nb_msix + IAVF_RX_VEC_START)
850 vec = IAVF_RX_VEC_START;
854 "%u vectors are mapping to %u Rx queues",
855 vf->nb_msix, dev->data->nb_rx_queues);
859 if (!vf->lv_enabled) {
860 if (iavf_config_irq_map(adapter)) {
861 PMD_DRV_LOG(ERR, "config interrupt mapping failed");
862 goto config_irq_map_err;
865 uint16_t num_qv_maps = dev->data->nb_rx_queues;
868 while (num_qv_maps > IAVF_IRQ_MAP_NUM_PER_BUF) {
869 if (iavf_config_irq_map_lv(adapter,
870 IAVF_IRQ_MAP_NUM_PER_BUF, index)) {
871 PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed");
872 goto config_irq_map_err;
874 num_qv_maps -= IAVF_IRQ_MAP_NUM_PER_BUF;
875 index += IAVF_IRQ_MAP_NUM_PER_BUF;
878 if (iavf_config_irq_map_lv(adapter, num_qv_maps, index)) {
879 PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed");
880 goto config_irq_map_err;
886 rte_free(vf->qv_map);
890 rte_intr_vec_list_free(intr_handle);
896 iavf_start_queues(struct rte_eth_dev *dev)
898 struct iavf_rx_queue *rxq;
899 struct iavf_tx_queue *txq;
902 for (i = 0; i < dev->data->nb_tx_queues; i++) {
903 txq = dev->data->tx_queues[i];
904 if (txq->tx_deferred_start)
906 if (iavf_dev_tx_queue_start(dev, i) != 0) {
907 PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
912 for (i = 0; i < dev->data->nb_rx_queues; i++) {
913 rxq = dev->data->rx_queues[i];
914 if (rxq->rx_deferred_start)
916 if (iavf_dev_rx_queue_start(dev, i) != 0) {
917 PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
926 iavf_dev_start(struct rte_eth_dev *dev)
928 struct iavf_adapter *adapter =
929 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
930 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
931 struct rte_intr_handle *intr_handle = dev->intr_handle;
932 uint16_t num_queue_pairs;
935 PMD_INIT_FUNC_TRACE();
937 adapter->stopped = 0;
939 vf->max_pkt_len = dev->data->mtu + IAVF_ETH_OVERHEAD;
940 vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
941 dev->data->nb_tx_queues);
942 num_queue_pairs = vf->num_queue_pairs;
944 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
945 if (iavf_get_qos_cap(adapter)) {
946 PMD_INIT_LOG(ERR, "Failed to get qos capability");
950 if (iavf_init_queues(dev) != 0) {
951 PMD_DRV_LOG(ERR, "failed to do Queue init");
955 if (iavf_set_vf_quanta_size(adapter, index, num_queue_pairs) != 0)
956 PMD_DRV_LOG(WARNING, "configure quanta size failed");
958 /* If needed, send configure queues msg multiple times to make the
959 * adminq buffer length smaller than the 4K limitation.
961 while (num_queue_pairs > IAVF_CFG_Q_NUM_PER_BUF) {
962 if (iavf_configure_queues(adapter,
963 IAVF_CFG_Q_NUM_PER_BUF, index) != 0) {
964 PMD_DRV_LOG(ERR, "configure queues failed");
967 num_queue_pairs -= IAVF_CFG_Q_NUM_PER_BUF;
968 index += IAVF_CFG_Q_NUM_PER_BUF;
971 if (iavf_configure_queues(adapter, num_queue_pairs, index) != 0) {
972 PMD_DRV_LOG(ERR, "configure queues failed");
976 if (iavf_config_rx_queues_irqs(dev, intr_handle) != 0) {
977 PMD_DRV_LOG(ERR, "configure irq failed");
980 /* re-enable intr again, because efd assign may change */
981 if (dev->data->dev_conf.intr_conf.rxq != 0) {
982 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
983 rte_intr_disable(intr_handle);
984 rte_intr_enable(intr_handle);
987 /* Set all mac addrs */
988 iavf_add_del_all_mac_addr(adapter, true);
990 /* Set all multicast addresses */
991 iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
994 if (iavf_start_queues(dev) != 0) {
995 PMD_DRV_LOG(ERR, "enable queues failed");
1002 iavf_add_del_all_mac_addr(adapter, false);
1008 iavf_dev_stop(struct rte_eth_dev *dev)
1010 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1011 struct iavf_adapter *adapter =
1012 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1013 struct rte_intr_handle *intr_handle = dev->intr_handle;
1015 PMD_INIT_FUNC_TRACE();
1017 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) &&
1018 dev->data->dev_conf.intr_conf.rxq != 0)
1019 rte_intr_disable(intr_handle);
1021 if (adapter->stopped == 1)
1024 iavf_stop_queues(dev);
1026 /* Disable the interrupt for Rx */
1027 rte_intr_efd_disable(intr_handle);
1028 /* Rx interrupt vector mapping free */
1029 rte_intr_vec_list_free(intr_handle);
1031 /* remove all mac addrs */
1032 iavf_add_del_all_mac_addr(adapter, false);
1034 /* remove all multicast addresses */
1035 iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
1038 /* free iAVF security device context all related resources */
1039 iavf_security_ctx_destroy(adapter);
1041 adapter->stopped = 1;
1042 dev->data->dev_started = 0;
1048 iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1050 struct iavf_adapter *adapter =
1051 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1052 struct iavf_info *vf = &adapter->vf;
1054 dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
1055 dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
1056 dev_info->min_rx_bufsize = IAVF_BUF_SIZE_MIN;
1057 dev_info->max_rx_pktlen = IAVF_FRAME_SIZE_MAX;
1058 dev_info->max_mtu = dev_info->max_rx_pktlen - IAVF_ETH_OVERHEAD;
1059 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
1060 dev_info->hash_key_size = vf->vf_res->rss_key_size;
1061 dev_info->reta_size = vf->vf_res->rss_lut_size;
1062 dev_info->flow_type_rss_offloads = IAVF_RSS_OFFLOAD_ALL;
1063 dev_info->max_mac_addrs = IAVF_NUM_MACADDR_MAX;
1064 dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
1065 dev_info->rx_offload_capa =
1066 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
1067 RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
1068 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
1069 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
1070 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
1071 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1072 RTE_ETH_RX_OFFLOAD_SCATTER |
1073 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
1074 RTE_ETH_RX_OFFLOAD_RSS_HASH;
1076 dev_info->tx_offload_capa =
1077 RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
1078 RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
1079 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
1080 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
1081 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
1082 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
1083 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1084 RTE_ETH_TX_OFFLOAD_TCP_TSO |
1085 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
1086 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
1087 RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
1088 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
1089 RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
1090 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
1092 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
1093 dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
1095 if (iavf_ipsec_crypto_supported(adapter)) {
1096 dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SECURITY;
1097 dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
1100 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1101 .rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
1106 dev_info->default_txconf = (struct rte_eth_txconf) {
1107 .tx_free_thresh = IAVF_DEFAULT_TX_FREE_THRESH,
1108 .tx_rs_thresh = IAVF_DEFAULT_TX_RS_THRESH,
1112 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1113 .nb_max = IAVF_MAX_RING_DESC,
1114 .nb_min = IAVF_MIN_RING_DESC,
1115 .nb_align = IAVF_ALIGN_RING_DESC,
1118 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1119 .nb_max = IAVF_MAX_RING_DESC,
1120 .nb_min = IAVF_MIN_RING_DESC,
1121 .nb_align = IAVF_ALIGN_RING_DESC,
1127 static const uint32_t *
1128 iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1130 static const uint32_t ptypes[] = {
1132 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1135 RTE_PTYPE_L4_NONFRAG,
1145 iavf_dev_link_update(struct rte_eth_dev *dev,
1146 __rte_unused int wait_to_complete)
1148 struct rte_eth_link new_link;
1149 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1151 memset(&new_link, 0, sizeof(new_link));
1153 /* Only read status info stored in VF, and the info is updated
1154 * when receive LINK_CHANGE evnet from PF by Virtchnnl.
1156 switch (vf->link_speed) {
1158 new_link.link_speed = RTE_ETH_SPEED_NUM_10M;
1161 new_link.link_speed = RTE_ETH_SPEED_NUM_100M;
1164 new_link.link_speed = RTE_ETH_SPEED_NUM_1G;
1167 new_link.link_speed = RTE_ETH_SPEED_NUM_10G;
1170 new_link.link_speed = RTE_ETH_SPEED_NUM_20G;
1173 new_link.link_speed = RTE_ETH_SPEED_NUM_25G;
1176 new_link.link_speed = RTE_ETH_SPEED_NUM_40G;
1179 new_link.link_speed = RTE_ETH_SPEED_NUM_50G;
1182 new_link.link_speed = RTE_ETH_SPEED_NUM_100G;
1185 new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1189 new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1190 new_link.link_status = vf->link_up ? RTE_ETH_LINK_UP :
1192 new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1193 RTE_ETH_LINK_SPEED_FIXED);
1195 return rte_eth_linkstatus_set(dev, &new_link);
1199 iavf_dev_promiscuous_enable(struct rte_eth_dev *dev)
1201 struct iavf_adapter *adapter =
1202 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1203 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1205 return iavf_config_promisc(adapter,
1206 true, vf->promisc_multicast_enabled);
1210 iavf_dev_promiscuous_disable(struct rte_eth_dev *dev)
1212 struct iavf_adapter *adapter =
1213 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1214 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1216 return iavf_config_promisc(adapter,
1217 false, vf->promisc_multicast_enabled);
1221 iavf_dev_allmulticast_enable(struct rte_eth_dev *dev)
1223 struct iavf_adapter *adapter =
1224 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1225 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1227 return iavf_config_promisc(adapter,
1228 vf->promisc_unicast_enabled, true);
1232 iavf_dev_allmulticast_disable(struct rte_eth_dev *dev)
1234 struct iavf_adapter *adapter =
1235 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1236 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1238 return iavf_config_promisc(adapter,
1239 vf->promisc_unicast_enabled, false);
1243 iavf_dev_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr,
1244 __rte_unused uint32_t index,
1245 __rte_unused uint32_t pool)
1247 struct iavf_adapter *adapter =
1248 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1249 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1252 if (rte_is_zero_ether_addr(addr)) {
1253 PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1257 err = iavf_add_del_eth_addr(adapter, addr, true, VIRTCHNL_ETHER_ADDR_EXTRA);
1259 PMD_DRV_LOG(ERR, "fail to add MAC address");
1269 iavf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1271 struct iavf_adapter *adapter =
1272 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1273 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1274 struct rte_ether_addr *addr;
1277 addr = &dev->data->mac_addrs[index];
1279 err = iavf_add_del_eth_addr(adapter, addr, false, VIRTCHNL_ETHER_ADDR_EXTRA);
1281 PMD_DRV_LOG(ERR, "fail to delete MAC address");
1287 iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1289 struct iavf_adapter *adapter =
1290 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1291 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1294 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
1295 err = iavf_add_del_vlan_v2(adapter, vlan_id, on);
1301 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
1304 err = iavf_add_del_vlan(adapter, vlan_id, on);
1311 iavf_iterate_vlan_filters_v2(struct rte_eth_dev *dev, bool enable)
1313 struct rte_vlan_filter_conf *vfc = &dev->data->vlan_filter_conf;
1314 struct iavf_adapter *adapter =
1315 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1319 for (i = 0; i < RTE_DIM(vfc->ids); i++) {
1320 if (vfc->ids[i] == 0)
1324 for (j = 0; ids != 0 && j < 64; j++, ids >>= 1) {
1326 iavf_add_del_vlan_v2(adapter,
1327 64 * i + j, enable);
1333 iavf_dev_vlan_offload_set_v2(struct rte_eth_dev *dev, int mask)
1335 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1336 struct iavf_adapter *adapter =
1337 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1341 if (mask & RTE_ETH_VLAN_FILTER_MASK) {
1342 enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
1344 iavf_iterate_vlan_filters_v2(dev, enable);
1347 if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1348 enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
1350 err = iavf_config_vlan_strip_v2(adapter, enable);
1351 /* If not support, the stripping is already disabled by PF */
1352 if (err == -ENOTSUP && !enable)
1362 iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1364 struct iavf_adapter *adapter =
1365 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1366 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1367 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1370 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2)
1371 return iavf_dev_vlan_offload_set_v2(dev, mask);
1373 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
1376 /* Vlan stripping setting */
1377 if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1378 /* Enable or disable VLAN stripping */
1379 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1380 err = iavf_enable_vlan_strip(adapter);
1382 err = iavf_disable_vlan_strip(adapter);
1391 iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
1392 struct rte_eth_rss_reta_entry64 *reta_conf,
1395 struct iavf_adapter *adapter =
1396 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1397 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1399 uint16_t i, idx, shift;
1402 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1405 if (reta_size != vf->vf_res->rss_lut_size) {
1406 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1407 "(%d) doesn't match the number of hardware can "
1408 "support (%d)", reta_size, vf->vf_res->rss_lut_size);
1412 lut = rte_zmalloc("rss_lut", reta_size, 0);
1414 PMD_DRV_LOG(ERR, "No memory can be allocated");
1417 /* store the old lut table temporarily */
1418 rte_memcpy(lut, vf->rss_lut, reta_size);
1420 for (i = 0; i < reta_size; i++) {
1421 idx = i / RTE_ETH_RETA_GROUP_SIZE;
1422 shift = i % RTE_ETH_RETA_GROUP_SIZE;
1423 if (reta_conf[idx].mask & (1ULL << shift))
1424 lut[i] = reta_conf[idx].reta[shift];
1427 rte_memcpy(vf->rss_lut, lut, reta_size);
1428 /* send virtchnl ops to configure RSS */
1429 ret = iavf_configure_rss_lut(adapter);
1430 if (ret) /* revert back */
1431 rte_memcpy(vf->rss_lut, lut, reta_size);
1438 iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
1439 struct rte_eth_rss_reta_entry64 *reta_conf,
1442 struct iavf_adapter *adapter =
1443 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1444 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1445 uint16_t i, idx, shift;
1447 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1450 if (reta_size != vf->vf_res->rss_lut_size) {
1451 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1452 "(%d) doesn't match the number of hardware can "
1453 "support (%d)", reta_size, vf->vf_res->rss_lut_size);
1457 for (i = 0; i < reta_size; i++) {
1458 idx = i / RTE_ETH_RETA_GROUP_SIZE;
1459 shift = i % RTE_ETH_RETA_GROUP_SIZE;
1460 if (reta_conf[idx].mask & (1ULL << shift))
1461 reta_conf[idx].reta[shift] = vf->rss_lut[i];
1468 iavf_set_rss_key(struct iavf_adapter *adapter, uint8_t *key, uint8_t key_len)
1470 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1472 /* HENA setting, it is enabled by default, no change */
1473 if (!key || key_len == 0) {
1474 PMD_DRV_LOG(DEBUG, "No key to be configured");
1476 } else if (key_len != vf->vf_res->rss_key_size) {
1477 PMD_DRV_LOG(ERR, "The size of hash key configured "
1478 "(%d) doesn't match the size of hardware can "
1479 "support (%d)", key_len,
1480 vf->vf_res->rss_key_size);
1484 rte_memcpy(vf->rss_key, key, key_len);
1486 return iavf_configure_rss_key(adapter);
1490 iavf_dev_rss_hash_update(struct rte_eth_dev *dev,
1491 struct rte_eth_rss_conf *rss_conf)
1493 struct iavf_adapter *adapter =
1494 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1495 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1498 adapter->dev_data->dev_conf.rx_adv_conf.rss_conf = *rss_conf;
1500 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1504 ret = iavf_set_rss_key(adapter, rss_conf->rss_key,
1505 rss_conf->rss_key_len);
1509 if (rss_conf->rss_hf == 0) {
1511 ret = iavf_set_hena(adapter, 0);
1513 /* It is a workaround, temporarily allow error to be returned
1514 * due to possible lack of PF handling for hena = 0.
1517 PMD_DRV_LOG(WARNING, "fail to clean existing RSS, lack PF support");
1521 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF) {
1522 /* Clear existing RSS. */
1523 ret = iavf_set_hena(adapter, 0);
1525 /* It is a workaround, temporarily allow error to be returned
1526 * due to possible lack of PF handling for hena = 0.
1529 PMD_DRV_LOG(WARNING, "fail to clean existing RSS,"
1532 /* Set new RSS configuration. */
1533 ret = iavf_rss_hash_set(adapter, rss_conf->rss_hf, true);
1535 PMD_DRV_LOG(ERR, "fail to set new RSS");
1539 iavf_config_rss_hf(adapter, rss_conf->rss_hf);
1546 iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1547 struct rte_eth_rss_conf *rss_conf)
1549 struct iavf_adapter *adapter =
1550 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1551 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1553 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1556 rss_conf->rss_hf = vf->rss_hf;
1558 if (!rss_conf->rss_key)
1561 rss_conf->rss_key_len = vf->vf_res->rss_key_size;
1562 rte_memcpy(rss_conf->rss_key, vf->rss_key, rss_conf->rss_key_len);
1568 iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu __rte_unused)
1570 /* mtu setting is forbidden if port is start */
1571 if (dev->data->dev_started) {
1572 PMD_DRV_LOG(ERR, "port must be stopped before configuration");
1580 iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
1581 struct rte_ether_addr *mac_addr)
1583 struct iavf_adapter *adapter =
1584 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1585 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
1586 struct rte_ether_addr *old_addr;
1589 old_addr = (struct rte_ether_addr *)hw->mac.addr;
1591 if (rte_is_same_ether_addr(old_addr, mac_addr))
1594 ret = iavf_add_del_eth_addr(adapter, old_addr, false, VIRTCHNL_ETHER_ADDR_PRIMARY);
1596 PMD_DRV_LOG(ERR, "Fail to delete old MAC:"
1597 RTE_ETHER_ADDR_PRT_FMT,
1598 RTE_ETHER_ADDR_BYTES(old_addr));
1600 ret = iavf_add_del_eth_addr(adapter, mac_addr, true, VIRTCHNL_ETHER_ADDR_PRIMARY);
1602 PMD_DRV_LOG(ERR, "Fail to add new MAC:"
1603 RTE_ETHER_ADDR_PRT_FMT,
1604 RTE_ETHER_ADDR_BYTES(mac_addr));
1609 rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)hw->mac.addr);
1614 iavf_stat_update_48(uint64_t *offset, uint64_t *stat)
1616 if (*stat >= *offset)
1617 *stat = *stat - *offset;
1619 *stat = (uint64_t)((*stat +
1620 ((uint64_t)1 << IAVF_48_BIT_WIDTH)) - *offset);
1622 *stat &= IAVF_48_BIT_MASK;
1626 iavf_stat_update_32(uint64_t *offset, uint64_t *stat)
1628 if (*stat >= *offset)
1629 *stat = (uint64_t)(*stat - *offset);
1631 *stat = (uint64_t)((*stat +
1632 ((uint64_t)1 << IAVF_32_BIT_WIDTH)) - *offset);
1636 iavf_update_stats(struct iavf_vsi *vsi, struct virtchnl_eth_stats *nes)
1638 struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset.eth_stats;
1640 iavf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
1641 iavf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
1642 iavf_stat_update_48(&oes->rx_multicast, &nes->rx_multicast);
1643 iavf_stat_update_48(&oes->rx_broadcast, &nes->rx_broadcast);
1644 iavf_stat_update_32(&oes->rx_discards, &nes->rx_discards);
1645 iavf_stat_update_48(&oes->tx_bytes, &nes->tx_bytes);
1646 iavf_stat_update_48(&oes->tx_unicast, &nes->tx_unicast);
1647 iavf_stat_update_48(&oes->tx_multicast, &nes->tx_multicast);
1648 iavf_stat_update_48(&oes->tx_broadcast, &nes->tx_broadcast);
1649 iavf_stat_update_32(&oes->tx_errors, &nes->tx_errors);
1650 iavf_stat_update_32(&oes->tx_discards, &nes->tx_discards);
1654 iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1656 struct iavf_adapter *adapter =
1657 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1658 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1659 struct iavf_vsi *vsi = &vf->vsi;
1660 struct virtchnl_eth_stats *pstats = NULL;
1663 ret = iavf_query_stats(adapter, &pstats);
1665 uint8_t crc_stats_len = (dev->data->dev_conf.rxmode.offloads &
1666 RTE_ETH_RX_OFFLOAD_KEEP_CRC) ? 0 :
1668 iavf_update_stats(vsi, pstats);
1669 stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
1670 pstats->rx_broadcast - pstats->rx_discards;
1671 stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
1673 stats->imissed = pstats->rx_discards;
1674 stats->oerrors = pstats->tx_errors + pstats->tx_discards;
1675 stats->ibytes = pstats->rx_bytes;
1676 stats->ibytes -= stats->ipackets * crc_stats_len;
1677 stats->obytes = pstats->tx_bytes;
1679 PMD_DRV_LOG(ERR, "Get statistics failed");
1685 iavf_dev_stats_reset(struct rte_eth_dev *dev)
1688 struct iavf_adapter *adapter =
1689 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1690 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1691 struct iavf_vsi *vsi = &vf->vsi;
1692 struct virtchnl_eth_stats *pstats = NULL;
1694 /* read stat values to clear hardware registers */
1695 ret = iavf_query_stats(adapter, &pstats);
1699 /* set stats offset base on current values */
1700 vsi->eth_stats_offset.eth_stats = *pstats;
1706 iavf_dev_xstats_reset(struct rte_eth_dev *dev)
1708 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1709 iavf_dev_stats_reset(dev);
1710 memset(&vf->vsi.eth_stats_offset.ips_stats, 0,
1711 sizeof(struct iavf_ipsec_crypto_stats));
1715 static int iavf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1716 struct rte_eth_xstat_name *xstats_names,
1717 __rte_unused unsigned int limit)
1721 if (xstats_names != NULL)
1722 for (i = 0; i < IAVF_NB_XSTATS; i++) {
1723 snprintf(xstats_names[i].name,
1724 sizeof(xstats_names[i].name),
1725 "%s", rte_iavf_stats_strings[i].name);
1727 return IAVF_NB_XSTATS;
1731 iavf_dev_update_ipsec_xstats(struct rte_eth_dev *ethdev,
1732 struct iavf_ipsec_crypto_stats *ips)
1735 for (idx = 0; idx < ethdev->data->nb_rx_queues; idx++) {
1736 struct iavf_rx_queue *rxq;
1737 struct iavf_ipsec_crypto_stats *stats;
1738 rxq = (struct iavf_rx_queue *)ethdev->data->rx_queues[idx];
1739 stats = &rxq->stats.ipsec_crypto;
1740 ips->icount += stats->icount;
1741 ips->ibytes += stats->ibytes;
1742 ips->ierrors.count += stats->ierrors.count;
1743 ips->ierrors.sad_miss += stats->ierrors.sad_miss;
1744 ips->ierrors.not_processed += stats->ierrors.not_processed;
1745 ips->ierrors.icv_check += stats->ierrors.icv_check;
1746 ips->ierrors.ipsec_length += stats->ierrors.ipsec_length;
1747 ips->ierrors.misc += stats->ierrors.misc;
1751 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
1752 struct rte_eth_xstat *xstats, unsigned int n)
1756 struct iavf_adapter *adapter =
1757 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1758 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1759 struct iavf_vsi *vsi = &vf->vsi;
1760 struct virtchnl_eth_stats *pstats = NULL;
1761 struct iavf_eth_xstats iavf_xtats = {{0}};
1763 if (n < IAVF_NB_XSTATS)
1764 return IAVF_NB_XSTATS;
1766 ret = iavf_query_stats(adapter, &pstats);
1773 iavf_update_stats(vsi, pstats);
1774 iavf_xtats.eth_stats = *pstats;
1776 if (iavf_ipsec_crypto_supported(adapter))
1777 iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats);
1779 /* loop over xstats array and values from pstats */
1780 for (i = 0; i < IAVF_NB_XSTATS; i++) {
1782 xstats[i].value = *(uint64_t *)(((char *)&iavf_xtats) +
1783 rte_iavf_stats_strings[i].offset);
1786 return IAVF_NB_XSTATS;
1791 iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1793 struct iavf_adapter *adapter =
1794 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1795 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1796 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
1797 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1800 msix_intr = rte_intr_vec_list_index_get(pci_dev->intr_handle,
1802 if (msix_intr == IAVF_MISC_VEC_ID) {
1803 PMD_DRV_LOG(INFO, "MISC is also enabled for control");
1804 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
1805 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
1806 IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
1807 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
1810 IAVF_VFINT_DYN_CTLN1
1811 (msix_intr - IAVF_RX_VEC_START),
1812 IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
1813 IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
1814 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
1817 IAVF_WRITE_FLUSH(hw);
1819 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1820 rte_intr_ack(pci_dev->intr_handle);
1826 iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1828 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1829 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1832 msix_intr = rte_intr_vec_list_index_get(pci_dev->intr_handle,
1834 if (msix_intr == IAVF_MISC_VEC_ID) {
1835 PMD_DRV_LOG(ERR, "MISC is used for control, cannot disable it");
1840 IAVF_VFINT_DYN_CTLN1(msix_intr - IAVF_RX_VEC_START),
1843 IAVF_WRITE_FLUSH(hw);
1848 iavf_check_vf_reset_done(struct iavf_hw *hw)
1852 for (i = 0; i < IAVF_RESET_WAIT_CNT; i++) {
1853 reset = IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) &
1854 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
1855 reset = reset >> IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT;
1856 if (reset == VIRTCHNL_VFR_VFACTIVE ||
1857 reset == VIRTCHNL_VFR_COMPLETED)
1862 if (i >= IAVF_RESET_WAIT_CNT)
1869 iavf_lookup_proto_xtr_type(const char *flex_name)
1873 enum iavf_proto_xtr_type type;
1874 } xtr_type_map[] = {
1875 { "vlan", IAVF_PROTO_XTR_VLAN },
1876 { "ipv4", IAVF_PROTO_XTR_IPV4 },
1877 { "ipv6", IAVF_PROTO_XTR_IPV6 },
1878 { "ipv6_flow", IAVF_PROTO_XTR_IPV6_FLOW },
1879 { "tcp", IAVF_PROTO_XTR_TCP },
1880 { "ip_offset", IAVF_PROTO_XTR_IP_OFFSET },
1881 { "ipsec_crypto_said", IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID },
1885 for (i = 0; i < RTE_DIM(xtr_type_map); i++) {
1886 if (strcmp(flex_name, xtr_type_map[i].name) == 0)
1887 return xtr_type_map[i].type;
1890 PMD_DRV_LOG(ERR, "wrong proto_xtr type, it should be: "
1891 "vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset|ipsec_crypto_said");
1897 * Parse elem, the elem could be single number/range or '(' ')' group
1898 * 1) A single number elem, it's just a simple digit. e.g. 9
1899 * 2) A single range elem, two digits with a '-' between. e.g. 2-6
1900 * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6)
1901 * Within group elem, '-' used for a range separator;
1902 * ',' used for a single number.
1905 iavf_parse_queue_set(const char *input, int xtr_type,
1906 struct iavf_devargs *devargs)
1908 const char *str = input;
1913 while (isblank(*str))
1916 if (!isdigit(*str) && *str != '(')
1919 /* process single number or single range of number */
1922 idx = strtoul(str, &end, 10);
1923 if (errno || !end || idx >= IAVF_MAX_QUEUE_NUM)
1926 while (isblank(*end))
1932 /* process single <number>-<number> */
1935 while (isblank(*end))
1941 idx = strtoul(end, &end, 10);
1942 if (errno || !end || idx >= IAVF_MAX_QUEUE_NUM)
1946 while (isblank(*end))
1953 for (idx = RTE_MIN(min, max);
1954 idx <= RTE_MAX(min, max); idx++)
1955 devargs->proto_xtr[idx] = xtr_type;
1960 /* process set within bracket */
1962 while (isblank(*str))
1967 min = IAVF_MAX_QUEUE_NUM;
1969 /* go ahead to the first digit */
1970 while (isblank(*str))
1975 /* get the digit value */
1977 idx = strtoul(str, &end, 10);
1978 if (errno || !end || idx >= IAVF_MAX_QUEUE_NUM)
1981 /* go ahead to separator '-',',' and ')' */
1982 while (isblank(*end))
1985 if (min == IAVF_MAX_QUEUE_NUM)
1987 else /* avoid continuous '-' */
1989 } else if (*end == ',' || *end == ')') {
1991 if (min == IAVF_MAX_QUEUE_NUM)
1994 for (idx = RTE_MIN(min, max);
1995 idx <= RTE_MAX(min, max); idx++)
1996 devargs->proto_xtr[idx] = xtr_type;
1998 min = IAVF_MAX_QUEUE_NUM;
2004 } while (*end != ')' && *end != '\0');
2010 iavf_parse_queue_proto_xtr(const char *queues, struct iavf_devargs *devargs)
2012 const char *queue_start;
2017 while (isblank(*queues))
2020 if (*queues != '[') {
2021 xtr_type = iavf_lookup_proto_xtr_type(queues);
2025 devargs->proto_xtr_dflt = xtr_type;
2032 while (isblank(*queues))
2034 if (*queues == '\0')
2037 queue_start = queues;
2039 /* go across a complete bracket */
2040 if (*queue_start == '(') {
2041 queues += strcspn(queues, ")");
2046 /* scan the separator ':' */
2047 queues += strcspn(queues, ":");
2048 if (*queues++ != ':')
2050 while (isblank(*queues))
2053 for (idx = 0; ; idx++) {
2054 if (isblank(queues[idx]) ||
2055 queues[idx] == ',' ||
2056 queues[idx] == ']' ||
2057 queues[idx] == '\0')
2060 if (idx > sizeof(flex_name) - 2)
2063 flex_name[idx] = queues[idx];
2065 flex_name[idx] = '\0';
2066 xtr_type = iavf_lookup_proto_xtr_type(flex_name);
2072 while (isblank(*queues) || *queues == ',' || *queues == ']')
2075 if (iavf_parse_queue_set(queue_start, xtr_type, devargs) < 0)
2077 } while (*queues != '\0');
2083 iavf_handle_proto_xtr_arg(__rte_unused const char *key, const char *value,
2086 struct iavf_devargs *devargs = extra_args;
2088 if (!value || !extra_args)
2091 if (iavf_parse_queue_proto_xtr(value, devargs) < 0) {
2092 PMD_DRV_LOG(ERR, "the proto_xtr's parameter is wrong : '%s'",
2101 parse_u16(__rte_unused const char *key, const char *value, void *args)
2103 u16 *num = (u16 *)args;
2107 tmp = strtoull(value, NULL, 10);
2108 if (errno || !tmp) {
2109 PMD_DRV_LOG(WARNING, "%s: \"%s\" is not a valid u16",
2119 static int iavf_parse_devargs(struct rte_eth_dev *dev)
2121 struct iavf_adapter *ad =
2122 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2123 struct rte_devargs *devargs = dev->device->devargs;
2124 struct rte_kvargs *kvlist;
2130 kvlist = rte_kvargs_parse(devargs->args, iavf_valid_args);
2132 PMD_INIT_LOG(ERR, "invalid kvargs key\n");
2136 ad->devargs.proto_xtr_dflt = IAVF_PROTO_XTR_NONE;
2137 memset(ad->devargs.proto_xtr, IAVF_PROTO_XTR_NONE,
2138 sizeof(ad->devargs.proto_xtr));
2140 ret = rte_kvargs_process(kvlist, IAVF_PROTO_XTR_ARG,
2141 &iavf_handle_proto_xtr_arg, &ad->devargs);
2145 ret = rte_kvargs_process(kvlist, IAVF_QUANTA_SIZE_ARG,
2146 &parse_u16, &ad->devargs.quanta_size);
2150 if (ad->devargs.quanta_size == 0)
2151 ad->devargs.quanta_size = 1024;
2153 if (ad->devargs.quanta_size < 256 || ad->devargs.quanta_size > 4096 ||
2154 ad->devargs.quanta_size & 0x40) {
2155 PMD_INIT_LOG(ERR, "invalid quanta size\n");
2160 rte_kvargs_free(kvlist);
2165 iavf_init_proto_xtr(struct rte_eth_dev *dev)
2167 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2168 struct iavf_adapter *ad =
2169 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2170 const struct iavf_proto_xtr_ol *xtr_ol;
2171 bool proto_xtr_enable = false;
2175 vf->proto_xtr = rte_zmalloc("vf proto xtr",
2176 vf->vsi_res->num_queue_pairs, 0);
2177 if (unlikely(!(vf->proto_xtr))) {
2178 PMD_DRV_LOG(ERR, "no memory for setting up proto_xtr's table");
2182 for (i = 0; i < vf->vsi_res->num_queue_pairs; i++) {
2183 vf->proto_xtr[i] = ad->devargs.proto_xtr[i] !=
2184 IAVF_PROTO_XTR_NONE ?
2185 ad->devargs.proto_xtr[i] :
2186 ad->devargs.proto_xtr_dflt;
2188 if (vf->proto_xtr[i] != IAVF_PROTO_XTR_NONE) {
2189 uint8_t type = vf->proto_xtr[i];
2191 iavf_proto_xtr_params[type].required = true;
2192 proto_xtr_enable = true;
2196 if (likely(!proto_xtr_enable))
2199 offset = rte_mbuf_dynfield_register(&iavf_proto_xtr_metadata_param);
2200 if (unlikely(offset == -1)) {
2202 "failed to extract protocol metadata, error %d",
2208 "proto_xtr metadata offset in mbuf is : %d",
2210 rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = offset;
2212 for (i = 0; i < RTE_DIM(iavf_proto_xtr_params); i++) {
2213 xtr_ol = &iavf_proto_xtr_params[i];
2215 uint8_t rxdid = iavf_proto_xtr_type_to_rxdid((uint8_t)i);
2217 if (!xtr_ol->required)
2220 if (!(vf->supported_rxdid & BIT(rxdid))) {
2222 "rxdid[%u] is not supported in hardware",
2224 rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
2228 offset = rte_mbuf_dynflag_register(&xtr_ol->param);
2229 if (unlikely(offset == -1)) {
2231 "failed to register proto_xtr offload '%s', error %d",
2232 xtr_ol->param.name, -rte_errno);
2234 rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
2239 "proto_xtr offload '%s' offset in mbuf is : %d",
2240 xtr_ol->param.name, offset);
2241 *xtr_ol->ol_flag = 1ULL << offset;
2246 iavf_init_vf(struct rte_eth_dev *dev)
2249 struct iavf_adapter *adapter =
2250 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2251 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2252 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2256 err = iavf_parse_devargs(dev);
2258 PMD_INIT_LOG(ERR, "Failed to parse devargs");
2262 err = iavf_set_mac_type(hw);
2264 PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
2268 err = iavf_check_vf_reset_done(hw);
2270 PMD_INIT_LOG(ERR, "VF is still resetting");
2274 iavf_init_adminq_parameter(hw);
2275 err = iavf_init_adminq(hw);
2277 PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
2281 vf->aq_resp = rte_zmalloc("vf_aq_resp", IAVF_AQ_BUF_SZ, 0);
2283 PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory");
2286 if (iavf_check_api_version(adapter) != 0) {
2287 PMD_INIT_LOG(ERR, "check_api version failed");
2291 bufsz = sizeof(struct virtchnl_vf_resource) +
2292 (IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
2293 vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
2295 PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
2299 if (iavf_get_vf_resource(adapter) != 0) {
2300 PMD_INIT_LOG(ERR, "iavf_get_vf_config failed");
2303 /* Allocate memort for RSS info */
2304 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2305 vf->rss_key = rte_zmalloc("rss_key",
2306 vf->vf_res->rss_key_size, 0);
2308 PMD_INIT_LOG(ERR, "unable to allocate rss_key memory");
2311 vf->rss_lut = rte_zmalloc("rss_lut",
2312 vf->vf_res->rss_lut_size, 0);
2314 PMD_INIT_LOG(ERR, "unable to allocate rss_lut memory");
2319 if (vf->vsi_res->num_queue_pairs > IAVF_MAX_NUM_QUEUES_DFLT)
2320 vf->lv_enabled = true;
2322 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
2323 if (iavf_get_supported_rxdid(adapter) != 0) {
2324 PMD_INIT_LOG(ERR, "failed to do get supported rxdid");
2329 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
2330 if (iavf_get_vlan_offload_caps_v2(adapter) != 0) {
2331 PMD_INIT_LOG(ERR, "failed to do get VLAN offload v2 capabilities");
2336 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS) {
2337 bufsz = sizeof(struct virtchnl_qos_cap_list) +
2338 IAVF_MAX_TRAFFIC_CLASS *
2339 sizeof(struct virtchnl_qos_cap_elem);
2340 vf->qos_cap = rte_zmalloc("qos_cap", bufsz, 0);
2342 PMD_INIT_LOG(ERR, "unable to allocate qos_cap memory");
2345 iavf_tm_conf_init(dev);
2348 iavf_init_proto_xtr(dev);
2352 rte_free(vf->rss_key);
2353 rte_free(vf->rss_lut);
2355 rte_free(vf->qos_cap);
2356 rte_free(vf->vf_res);
2359 rte_free(vf->aq_resp);
2361 iavf_shutdown_adminq(hw);
2367 iavf_uninit_vf(struct rte_eth_dev *dev)
2369 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2370 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2372 iavf_shutdown_adminq(hw);
2374 rte_free(vf->vf_res);
2378 rte_free(vf->aq_resp);
2381 rte_free(vf->qos_cap);
2384 rte_free(vf->rss_lut);
2386 rte_free(vf->rss_key);
2390 /* Enable default admin queue interrupt setting */
2392 iavf_enable_irq0(struct iavf_hw *hw)
2394 /* Enable admin queue interrupt trigger */
2395 IAVF_WRITE_REG(hw, IAVF_VFINT_ICR0_ENA1,
2396 IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
2398 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
2399 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
2400 IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
2401 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
2403 IAVF_WRITE_FLUSH(hw);
2407 iavf_disable_irq0(struct iavf_hw *hw)
2409 /* Disable all interrupt types */
2410 IAVF_WRITE_REG(hw, IAVF_VFINT_ICR0_ENA1, 0);
2411 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
2412 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
2413 IAVF_WRITE_FLUSH(hw);
2417 iavf_dev_interrupt_handler(void *param)
2419 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2420 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2422 iavf_disable_irq0(hw);
2424 iavf_handle_virtchnl_msg(dev);
2426 iavf_enable_irq0(hw);
2430 iavf_dev_alarm_handler(void *param)
2432 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2433 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2436 iavf_disable_irq0(hw);
2438 /* read out interrupt causes */
2439 icr0 = IAVF_READ_REG(hw, IAVF_VFINT_ICR01);
2441 if (icr0 & IAVF_VFINT_ICR01_ADMINQ_MASK) {
2442 PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported");
2443 iavf_handle_virtchnl_msg(dev);
2446 iavf_enable_irq0(hw);
2448 rte_eal_alarm_set(IAVF_ALARM_INTERVAL,
2449 iavf_dev_alarm_handler, dev);
2453 iavf_dev_flow_ops_get(struct rte_eth_dev *dev,
2454 const struct rte_flow_ops **ops)
2459 *ops = &iavf_flow_ops;
2464 iavf_default_rss_disable(struct iavf_adapter *adapter)
2466 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
2469 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2470 /* Set hena = 0 to ask PF to cleanup all existing RSS. */
2471 ret = iavf_set_hena(adapter, 0);
2473 /* It is a workaround, temporarily allow error to be
2474 * returned due to possible lack of PF handling for
2477 PMD_INIT_LOG(WARNING, "fail to disable default RSS,"
2483 iavf_dev_init(struct rte_eth_dev *eth_dev)
2485 struct iavf_adapter *adapter =
2486 IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
2487 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
2488 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
2489 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2492 PMD_INIT_FUNC_TRACE();
2494 /* assign ops func pointer */
2495 eth_dev->dev_ops = &iavf_eth_dev_ops;
2496 eth_dev->rx_queue_count = iavf_dev_rxq_count;
2497 eth_dev->rx_descriptor_status = iavf_dev_rx_desc_status;
2498 eth_dev->tx_descriptor_status = iavf_dev_tx_desc_status;
2499 eth_dev->rx_pkt_burst = &iavf_recv_pkts;
2500 eth_dev->tx_pkt_burst = &iavf_xmit_pkts;
2501 eth_dev->tx_pkt_prepare = &iavf_prep_pkts;
2503 /* For secondary processes, we don't initialise any further as primary
2504 * has already done this work. Only check if we need a different RX
2507 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2508 iavf_set_rx_function(eth_dev);
2509 iavf_set_tx_function(eth_dev);
2512 rte_eth_copy_pci_info(eth_dev, pci_dev);
2514 hw->vendor_id = pci_dev->id.vendor_id;
2515 hw->device_id = pci_dev->id.device_id;
2516 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2517 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
2518 hw->bus.bus_id = pci_dev->addr.bus;
2519 hw->bus.device = pci_dev->addr.devid;
2520 hw->bus.func = pci_dev->addr.function;
2521 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
2522 hw->back = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
2523 adapter->dev_data = eth_dev->data;
2524 adapter->stopped = 1;
2526 if (iavf_init_vf(eth_dev) != 0) {
2527 PMD_INIT_LOG(ERR, "Init vf failed");
2531 /* set default ptype table */
2532 iavf_set_default_ptype_table(eth_dev);
2535 eth_dev->data->mac_addrs = rte_zmalloc(
2536 "iavf_mac", RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX, 0);
2537 if (!eth_dev->data->mac_addrs) {
2538 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
2539 " store MAC addresses",
2540 RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX);
2544 /* If the MAC address is not configured by host,
2545 * generate a random one.
2547 if (!rte_is_valid_assigned_ether_addr(
2548 (struct rte_ether_addr *)hw->mac.addr))
2549 rte_eth_random_addr(hw->mac.addr);
2550 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
2551 ð_dev->data->mac_addrs[0]);
2553 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
2554 /* register callback func to eal lib */
2555 rte_intr_callback_register(pci_dev->intr_handle,
2556 iavf_dev_interrupt_handler,
2559 /* enable uio intr after callback register */
2560 rte_intr_enable(pci_dev->intr_handle);
2562 rte_eal_alarm_set(IAVF_ALARM_INTERVAL,
2563 iavf_dev_alarm_handler, eth_dev);
2566 /* configure and enable device interrupt */
2567 iavf_enable_irq0(hw);
2569 ret = iavf_flow_init(adapter);
2571 PMD_INIT_LOG(ERR, "Failed to initialize flow");
2575 /** Check if the IPsec Crypto offload is supported and create
2576 * security_ctx if it is.
2578 if (iavf_ipsec_crypto_supported(adapter)) {
2579 /* Initialize security_ctx only for primary process*/
2580 ret = iavf_security_ctx_create(adapter);
2582 PMD_INIT_LOG(ERR, "failed to create ipsec crypto security instance");
2586 ret = iavf_security_init(adapter);
2588 PMD_INIT_LOG(ERR, "failed to initialized ipsec crypto resources");
2593 iavf_default_rss_disable(adapter);
2596 /* Start device watchdog */
2597 iavf_dev_watchdog_enable(adapter);
2603 rte_free(eth_dev->data->mac_addrs);
2604 eth_dev->data->mac_addrs = NULL;
2607 iavf_uninit_vf(eth_dev);
2613 iavf_dev_close(struct rte_eth_dev *dev)
2615 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2616 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2617 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2618 struct iavf_adapter *adapter =
2619 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2620 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2623 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2626 ret = iavf_dev_stop(dev);
2628 iavf_flow_flush(dev, NULL);
2629 iavf_flow_uninit(adapter);
2632 * disable promiscuous mode before reset vf
2633 * it is a workaround solution when work with kernel driver
2634 * and it is not the normal way
2636 if (vf->promisc_unicast_enabled || vf->promisc_multicast_enabled)
2637 iavf_config_promisc(adapter, false, false);
2639 iavf_shutdown_adminq(hw);
2640 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
2641 /* disable uio intr before callback unregister */
2642 rte_intr_disable(intr_handle);
2644 /* unregister callback func from eal lib */
2645 rte_intr_callback_unregister(intr_handle,
2646 iavf_dev_interrupt_handler, dev);
2648 rte_eal_alarm_cancel(iavf_dev_alarm_handler, dev);
2650 iavf_disable_irq0(hw);
2652 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
2653 iavf_tm_conf_uninit(dev);
2655 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2657 rte_free(vf->rss_lut);
2661 rte_free(vf->rss_key);
2666 rte_free(vf->vf_res);
2670 rte_free(vf->aq_resp);
2674 * If the VF is reset via VFLR, the device will be knocked out of bus
2675 * master mode, and the driver will fail to recover from the reset. Fix
2676 * this by enabling bus mastering after every reset. In a non-VFLR case,
2677 * the bus master bit will not be disabled, and this call will have no
2680 if (vf->vf_reset && !rte_pci_set_bus_master(pci_dev, true))
2681 vf->vf_reset = false;
2683 /* disable watchdog */
2684 iavf_dev_watchdog_disable(adapter);
2690 iavf_dev_uninit(struct rte_eth_dev *dev)
2692 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2695 iavf_dev_close(dev);
2701 * Reset VF device only to re-initialize resources in PMD layer
2704 iavf_dev_reset(struct rte_eth_dev *dev)
2708 ret = iavf_dev_uninit(dev);
2712 return iavf_dev_init(dev);
2716 iavf_dcf_cap_check_handler(__rte_unused const char *key,
2717 const char *value, __rte_unused void *opaque)
2719 if (strcmp(value, "dcf"))
2726 iavf_dcf_cap_selected(struct rte_devargs *devargs)
2728 struct rte_kvargs *kvlist;
2729 const char *key = "cap";
2732 if (devargs == NULL)
2735 kvlist = rte_kvargs_parse(devargs->args, NULL);
2739 if (!rte_kvargs_count(kvlist, key))
2742 /* dcf capability selected when there's a key-value pair: cap=dcf */
2743 if (rte_kvargs_process(kvlist, key,
2744 iavf_dcf_cap_check_handler, NULL) < 0)
2750 rte_kvargs_free(kvlist);
2754 static int eth_iavf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2755 struct rte_pci_device *pci_dev)
2757 if (iavf_dcf_cap_selected(pci_dev->device.devargs))
2760 return rte_eth_dev_pci_generic_probe(pci_dev,
2761 sizeof(struct iavf_adapter), iavf_dev_init);
2764 static int eth_iavf_pci_remove(struct rte_pci_device *pci_dev)
2766 return rte_eth_dev_pci_generic_remove(pci_dev, iavf_dev_uninit);
2769 /* Adaptive virtual function driver struct */
2770 static struct rte_pci_driver rte_iavf_pmd = {
2771 .id_table = pci_id_iavf_map,
2772 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2773 .probe = eth_iavf_pci_probe,
2774 .remove = eth_iavf_pci_remove,
2777 RTE_PMD_REGISTER_PCI(net_iavf, rte_iavf_pmd);
2778 RTE_PMD_REGISTER_PCI_TABLE(net_iavf, pci_id_iavf_map);
2779 RTE_PMD_REGISTER_KMOD_DEP(net_iavf, "* igb_uio | vfio-pci");
2780 RTE_PMD_REGISTER_PARAM_STRING(net_iavf, "cap=dcf");
2781 RTE_LOG_REGISTER_SUFFIX(iavf_logtype_init, init, NOTICE);
2782 RTE_LOG_REGISTER_SUFFIX(iavf_logtype_driver, driver, NOTICE);
2783 #ifdef RTE_ETHDEV_DEBUG_RX
2784 RTE_LOG_REGISTER_SUFFIX(iavf_logtype_rx, rx, DEBUG);
2786 #ifdef RTE_ETHDEV_DEBUG_TX
2787 RTE_LOG_REGISTER_SUFFIX(iavf_logtype_tx, tx, DEBUG);