1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
16 #include <rte_interrupts.h>
17 #include <rte_debug.h>
19 #include <rte_alarm.h>
20 #include <rte_atomic.h>
22 #include <rte_ether.h>
23 #include <ethdev_driver.h>
24 #include <ethdev_pci.h>
25 #include <rte_malloc.h>
26 #include <rte_memzone.h>
30 #include "iavf_rxtx.h"
31 #include "iavf_generic_flow.h"
32 #include "rte_pmd_iavf.h"
33 #include "iavf_ipsec_crypto.h"
36 #define IAVF_PROTO_XTR_ARG "proto_xtr"
37 #define IAVF_QUANTA_SIZE_ARG "quanta_size"
39 uint64_t iavf_timestamp_dynflag;
40 int iavf_timestamp_dynfield_offset = -1;
42 static const char * const iavf_valid_args[] = {
48 static const struct rte_mbuf_dynfield iavf_proto_xtr_metadata_param = {
49 .name = "intel_pmd_dynfield_proto_xtr_metadata",
50 .size = sizeof(uint32_t),
51 .align = __alignof__(uint32_t),
55 struct iavf_proto_xtr_ol {
56 const struct rte_mbuf_dynflag param;
61 static struct iavf_proto_xtr_ol iavf_proto_xtr_params[] = {
62 [IAVF_PROTO_XTR_VLAN] = {
63 .param = { .name = "intel_pmd_dynflag_proto_xtr_vlan" },
64 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_vlan_mask },
65 [IAVF_PROTO_XTR_IPV4] = {
66 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv4" },
67 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask },
68 [IAVF_PROTO_XTR_IPV6] = {
69 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6" },
70 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask },
71 [IAVF_PROTO_XTR_IPV6_FLOW] = {
72 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6_flow" },
73 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask },
74 [IAVF_PROTO_XTR_TCP] = {
75 .param = { .name = "intel_pmd_dynflag_proto_xtr_tcp" },
76 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_tcp_mask },
77 [IAVF_PROTO_XTR_IP_OFFSET] = {
78 .param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
79 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask },
80 [IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] = {
82 .name = "intel_pmd_dynflag_proto_xtr_ipsec_crypto_said" },
84 &rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask },
87 static int iavf_dev_configure(struct rte_eth_dev *dev);
88 static int iavf_dev_start(struct rte_eth_dev *dev);
89 static int iavf_dev_stop(struct rte_eth_dev *dev);
90 static int iavf_dev_close(struct rte_eth_dev *dev);
91 static int iavf_dev_reset(struct rte_eth_dev *dev);
92 static int iavf_dev_info_get(struct rte_eth_dev *dev,
93 struct rte_eth_dev_info *dev_info);
94 static const uint32_t *iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
95 static int iavf_dev_stats_get(struct rte_eth_dev *dev,
96 struct rte_eth_stats *stats);
97 static int iavf_dev_stats_reset(struct rte_eth_dev *dev);
98 static int iavf_dev_xstats_reset(struct rte_eth_dev *dev);
99 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
100 struct rte_eth_xstat *xstats, unsigned int n);
101 static int iavf_dev_xstats_get_names(struct rte_eth_dev *dev,
102 struct rte_eth_xstat_name *xstats_names,
104 static int iavf_dev_promiscuous_enable(struct rte_eth_dev *dev);
105 static int iavf_dev_promiscuous_disable(struct rte_eth_dev *dev);
106 static int iavf_dev_allmulticast_enable(struct rte_eth_dev *dev);
107 static int iavf_dev_allmulticast_disable(struct rte_eth_dev *dev);
108 static int iavf_dev_add_mac_addr(struct rte_eth_dev *dev,
109 struct rte_ether_addr *addr,
112 static void iavf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index);
113 static int iavf_dev_vlan_filter_set(struct rte_eth_dev *dev,
114 uint16_t vlan_id, int on);
115 static int iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
116 static int iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
117 struct rte_eth_rss_reta_entry64 *reta_conf,
119 static int iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
120 struct rte_eth_rss_reta_entry64 *reta_conf,
122 static int iavf_dev_rss_hash_update(struct rte_eth_dev *dev,
123 struct rte_eth_rss_conf *rss_conf);
124 static int iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
125 struct rte_eth_rss_conf *rss_conf);
126 static int iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
127 static int iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
128 struct rte_ether_addr *mac_addr);
129 static int iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
131 static int iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
133 static int iavf_dev_flow_ops_get(struct rte_eth_dev *dev,
134 const struct rte_flow_ops **ops);
135 static int iavf_set_mc_addr_list(struct rte_eth_dev *dev,
136 struct rte_ether_addr *mc_addrs,
137 uint32_t mc_addrs_num);
138 static int iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg);
140 static const struct rte_pci_id pci_id_iavf_map[] = {
141 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
142 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_VF) },
143 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_VF_HV) },
144 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_X722_VF) },
145 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_X722_A0_VF) },
146 { .vendor_id = 0, /* sentinel */ },
149 struct rte_iavf_xstats_name_off {
150 char name[RTE_ETH_XSTATS_NAME_SIZE];
154 #define _OFF_OF(a) offsetof(struct iavf_eth_xstats, a)
155 static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
156 {"rx_bytes", _OFF_OF(eth_stats.rx_bytes)},
157 {"rx_unicast_packets", _OFF_OF(eth_stats.rx_unicast)},
158 {"rx_multicast_packets", _OFF_OF(eth_stats.rx_multicast)},
159 {"rx_broadcast_packets", _OFF_OF(eth_stats.rx_broadcast)},
160 {"rx_dropped_packets", _OFF_OF(eth_stats.rx_discards)},
161 {"rx_unknown_protocol_packets", offsetof(struct iavf_eth_stats,
162 rx_unknown_protocol)},
163 {"tx_bytes", _OFF_OF(eth_stats.tx_bytes)},
164 {"tx_unicast_packets", _OFF_OF(eth_stats.tx_unicast)},
165 {"tx_multicast_packets", _OFF_OF(eth_stats.tx_multicast)},
166 {"tx_broadcast_packets", _OFF_OF(eth_stats.tx_broadcast)},
167 {"tx_dropped_packets", _OFF_OF(eth_stats.tx_discards)},
168 {"tx_error_packets", _OFF_OF(eth_stats.tx_errors)},
170 {"inline_ipsec_crypto_ipackets", _OFF_OF(ips_stats.icount)},
171 {"inline_ipsec_crypto_ibytes", _OFF_OF(ips_stats.ibytes)},
172 {"inline_ipsec_crypto_ierrors", _OFF_OF(ips_stats.ierrors.count)},
173 {"inline_ipsec_crypto_ierrors_sad_lookup",
174 _OFF_OF(ips_stats.ierrors.sad_miss)},
175 {"inline_ipsec_crypto_ierrors_not_processed",
176 _OFF_OF(ips_stats.ierrors.not_processed)},
177 {"inline_ipsec_crypto_ierrors_icv_fail",
178 _OFF_OF(ips_stats.ierrors.icv_check)},
179 {"inline_ipsec_crypto_ierrors_length",
180 _OFF_OF(ips_stats.ierrors.ipsec_length)},
181 {"inline_ipsec_crypto_ierrors_misc",
182 _OFF_OF(ips_stats.ierrors.misc)},
186 #define IAVF_NB_XSTATS (sizeof(rte_iavf_stats_strings) / \
187 sizeof(rte_iavf_stats_strings[0]))
189 static const struct eth_dev_ops iavf_eth_dev_ops = {
190 .dev_configure = iavf_dev_configure,
191 .dev_start = iavf_dev_start,
192 .dev_stop = iavf_dev_stop,
193 .dev_close = iavf_dev_close,
194 .dev_reset = iavf_dev_reset,
195 .dev_infos_get = iavf_dev_info_get,
196 .dev_supported_ptypes_get = iavf_dev_supported_ptypes_get,
197 .link_update = iavf_dev_link_update,
198 .stats_get = iavf_dev_stats_get,
199 .stats_reset = iavf_dev_stats_reset,
200 .xstats_get = iavf_dev_xstats_get,
201 .xstats_get_names = iavf_dev_xstats_get_names,
202 .xstats_reset = iavf_dev_xstats_reset,
203 .promiscuous_enable = iavf_dev_promiscuous_enable,
204 .promiscuous_disable = iavf_dev_promiscuous_disable,
205 .allmulticast_enable = iavf_dev_allmulticast_enable,
206 .allmulticast_disable = iavf_dev_allmulticast_disable,
207 .mac_addr_add = iavf_dev_add_mac_addr,
208 .mac_addr_remove = iavf_dev_del_mac_addr,
209 .set_mc_addr_list = iavf_set_mc_addr_list,
210 .vlan_filter_set = iavf_dev_vlan_filter_set,
211 .vlan_offload_set = iavf_dev_vlan_offload_set,
212 .rx_queue_start = iavf_dev_rx_queue_start,
213 .rx_queue_stop = iavf_dev_rx_queue_stop,
214 .tx_queue_start = iavf_dev_tx_queue_start,
215 .tx_queue_stop = iavf_dev_tx_queue_stop,
216 .rx_queue_setup = iavf_dev_rx_queue_setup,
217 .rx_queue_release = iavf_dev_rx_queue_release,
218 .tx_queue_setup = iavf_dev_tx_queue_setup,
219 .tx_queue_release = iavf_dev_tx_queue_release,
220 .mac_addr_set = iavf_dev_set_default_mac_addr,
221 .reta_update = iavf_dev_rss_reta_update,
222 .reta_query = iavf_dev_rss_reta_query,
223 .rss_hash_update = iavf_dev_rss_hash_update,
224 .rss_hash_conf_get = iavf_dev_rss_hash_conf_get,
225 .rxq_info_get = iavf_dev_rxq_info_get,
226 .txq_info_get = iavf_dev_txq_info_get,
227 .mtu_set = iavf_dev_mtu_set,
228 .rx_queue_intr_enable = iavf_dev_rx_queue_intr_enable,
229 .rx_queue_intr_disable = iavf_dev_rx_queue_intr_disable,
230 .flow_ops_get = iavf_dev_flow_ops_get,
231 .tx_done_cleanup = iavf_dev_tx_done_cleanup,
232 .get_monitor_addr = iavf_get_monitor_addr,
233 .tm_ops_get = iavf_tm_ops_get,
237 iavf_tm_ops_get(struct rte_eth_dev *dev,
240 struct iavf_adapter *adapter =
241 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
249 *(const void **)arg = &iavf_tm_ops;
256 iavf_vfr_inprogress(struct iavf_hw *hw)
260 if ((IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) &
261 IAVF_VFGEN_RSTAT_VFR_STATE_MASK) ==
262 VIRTCHNL_VFR_INPROGRESS)
266 PMD_DRV_LOG(INFO, "Watchdog detected VFR in progress");
273 iavf_dev_watchdog(void *cb_arg)
275 struct iavf_adapter *adapter = cb_arg;
276 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
277 int vfr_inprogress = 0, rc = 0;
279 /* check if watchdog has been disabled since last call */
280 if (!adapter->vf.watchdog_enabled)
283 /* If in reset then poll vfr_inprogress register for completion */
284 if (adapter->vf.vf_reset) {
285 vfr_inprogress = iavf_vfr_inprogress(hw);
287 if (!vfr_inprogress) {
288 PMD_DRV_LOG(INFO, "VF \"%s\" reset has completed",
289 adapter->vf.eth_dev->data->name);
290 adapter->vf.vf_reset = false;
292 /* If not in reset then poll vfr_inprogress register for VFLR event */
294 vfr_inprogress = iavf_vfr_inprogress(hw);
296 if (vfr_inprogress) {
298 "VF \"%s\" reset event detected by watchdog",
299 adapter->vf.eth_dev->data->name);
301 /* enter reset state with VFLR event */
302 adapter->vf.vf_reset = true;
304 rte_eth_dev_callback_process(adapter->vf.eth_dev,
305 RTE_ETH_EVENT_INTR_RESET, NULL);
309 /* re-alarm watchdog */
310 rc = rte_eal_alarm_set(IAVF_DEV_WATCHDOG_PERIOD,
311 &iavf_dev_watchdog, cb_arg);
314 PMD_DRV_LOG(ERR, "Failed \"%s\" to reset device watchdog alarm",
315 adapter->vf.eth_dev->data->name);
319 iavf_dev_watchdog_enable(struct iavf_adapter *adapter __rte_unused)
321 #if (IAVF_DEV_WATCHDOG_PERIOD > 0)
322 PMD_DRV_LOG(INFO, "Enabling device watchdog");
323 adapter->vf.watchdog_enabled = true;
324 if (rte_eal_alarm_set(IAVF_DEV_WATCHDOG_PERIOD,
325 &iavf_dev_watchdog, (void *)adapter))
326 PMD_DRV_LOG(ERR, "Failed to enabled device watchdog");
331 iavf_dev_watchdog_disable(struct iavf_adapter *adapter __rte_unused)
333 #if (IAVF_DEV_WATCHDOG_PERIOD > 0)
334 PMD_DRV_LOG(INFO, "Disabling device watchdog");
335 adapter->vf.watchdog_enabled = false;
340 iavf_set_mc_addr_list(struct rte_eth_dev *dev,
341 struct rte_ether_addr *mc_addrs,
342 uint32_t mc_addrs_num)
344 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
345 struct iavf_adapter *adapter =
346 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
349 if (mc_addrs_num > IAVF_NUM_MACADDR_MAX) {
351 "can't add more than a limited number (%u) of addresses.",
352 (uint32_t)IAVF_NUM_MACADDR_MAX);
359 /* flush previous addresses */
360 err = iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
366 err = iavf_add_del_mc_addr_list(adapter, mc_addrs, mc_addrs_num, true);
369 /* if adding mac address list fails, should add the previous
372 ret = iavf_add_del_mc_addr_list(adapter, vf->mc_addrs,
373 vf->mc_addrs_num, true);
377 vf->mc_addrs_num = mc_addrs_num;
379 mc_addrs, mc_addrs_num * sizeof(*mc_addrs));
386 iavf_config_rss_hf(struct iavf_adapter *adapter, uint64_t rss_hf)
388 static const uint64_t map_hena_rss[] = {
390 [IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
391 RTE_ETH_RSS_NONFRAG_IPV4_UDP,
392 [IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
393 RTE_ETH_RSS_NONFRAG_IPV4_UDP,
394 [IAVF_FILTER_PCTYPE_NONF_IPV4_UDP] =
395 RTE_ETH_RSS_NONFRAG_IPV4_UDP,
396 [IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
397 RTE_ETH_RSS_NONFRAG_IPV4_TCP,
398 [IAVF_FILTER_PCTYPE_NONF_IPV4_TCP] =
399 RTE_ETH_RSS_NONFRAG_IPV4_TCP,
400 [IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP] =
401 RTE_ETH_RSS_NONFRAG_IPV4_SCTP,
402 [IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER] =
403 RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
404 [IAVF_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_RSS_FRAG_IPV4,
407 [IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
408 RTE_ETH_RSS_NONFRAG_IPV6_UDP,
409 [IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
410 RTE_ETH_RSS_NONFRAG_IPV6_UDP,
411 [IAVF_FILTER_PCTYPE_NONF_IPV6_UDP] =
412 RTE_ETH_RSS_NONFRAG_IPV6_UDP,
413 [IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
414 RTE_ETH_RSS_NONFRAG_IPV6_TCP,
415 [IAVF_FILTER_PCTYPE_NONF_IPV6_TCP] =
416 RTE_ETH_RSS_NONFRAG_IPV6_TCP,
417 [IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP] =
418 RTE_ETH_RSS_NONFRAG_IPV6_SCTP,
419 [IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER] =
420 RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
421 [IAVF_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_RSS_FRAG_IPV6,
424 [IAVF_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_RSS_L2_PAYLOAD
427 const uint64_t ipv4_rss = RTE_ETH_RSS_NONFRAG_IPV4_UDP |
428 RTE_ETH_RSS_NONFRAG_IPV4_TCP |
429 RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
430 RTE_ETH_RSS_NONFRAG_IPV4_OTHER |
431 RTE_ETH_RSS_FRAG_IPV4;
433 const uint64_t ipv6_rss = RTE_ETH_RSS_NONFRAG_IPV6_UDP |
434 RTE_ETH_RSS_NONFRAG_IPV6_TCP |
435 RTE_ETH_RSS_NONFRAG_IPV6_SCTP |
436 RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
437 RTE_ETH_RSS_FRAG_IPV6;
439 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
440 uint64_t caps = 0, hena = 0, valid_rss_hf = 0;
444 ret = iavf_get_hena_caps(adapter, &caps);
447 * RSS offload type configuration is not a necessary feature
448 * for VF, so here just print a warning and return.
451 "fail to get RSS offload type caps, ret: %d", ret);
456 * RTE_ETH_RSS_IPV4 and RTE_ETH_RSS_IPV6 can be considered as 2
457 * generalizations of all other IPv4 and IPv6 RSS types.
459 if (rss_hf & RTE_ETH_RSS_IPV4)
462 if (rss_hf & RTE_ETH_RSS_IPV6)
465 RTE_BUILD_BUG_ON(RTE_DIM(map_hena_rss) > sizeof(uint64_t) * CHAR_BIT);
467 for (i = 0; i < RTE_DIM(map_hena_rss); i++) {
468 uint64_t bit = BIT_ULL(i);
470 if ((caps & bit) && (map_hena_rss[i] & rss_hf)) {
471 valid_rss_hf |= map_hena_rss[i];
476 ret = iavf_set_hena(adapter, hena);
479 * RSS offload type configuration is not a necessary feature
480 * for VF, so here just print a warning and return.
483 "fail to set RSS offload types, ret: %d", ret);
487 if (valid_rss_hf & ipv4_rss)
488 valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV4;
490 if (valid_rss_hf & ipv6_rss)
491 valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV6;
493 if (rss_hf & ~valid_rss_hf)
494 PMD_DRV_LOG(WARNING, "Unsupported rss_hf 0x%" PRIx64,
495 rss_hf & ~valid_rss_hf);
497 vf->rss_hf = valid_rss_hf;
501 iavf_init_rss(struct iavf_adapter *adapter)
503 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
504 struct rte_eth_rss_conf *rss_conf;
508 rss_conf = &adapter->dev_data->dev_conf.rx_adv_conf.rss_conf;
509 nb_q = RTE_MIN(adapter->dev_data->nb_rx_queues,
510 vf->max_rss_qregion);
512 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
513 PMD_DRV_LOG(DEBUG, "RSS is not supported");
517 /* configure RSS key */
518 if (!rss_conf->rss_key) {
519 /* Calculate the default hash key */
520 for (i = 0; i < vf->vf_res->rss_key_size; i++)
521 vf->rss_key[i] = (uint8_t)rte_rand();
523 rte_memcpy(vf->rss_key, rss_conf->rss_key,
524 RTE_MIN(rss_conf->rss_key_len,
525 vf->vf_res->rss_key_size));
527 /* init RSS LUT table */
528 for (i = 0, j = 0; i < vf->vf_res->rss_lut_size; i++, j++) {
533 /* send virtchnl ops to configure RSS */
534 ret = iavf_configure_rss_lut(adapter);
537 ret = iavf_configure_rss_key(adapter);
541 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF) {
542 /* Set RSS hash configuration based on rss_conf->rss_hf. */
543 ret = iavf_rss_hash_set(adapter, rss_conf->rss_hf, true);
545 PMD_DRV_LOG(ERR, "fail to set default RSS");
549 iavf_config_rss_hf(adapter, rss_conf->rss_hf);
556 iavf_queues_req_reset(struct rte_eth_dev *dev, uint16_t num)
558 struct iavf_adapter *ad =
559 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
560 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
563 ret = iavf_request_queues(dev, num);
565 PMD_DRV_LOG(ERR, "request queues from PF failed");
568 PMD_DRV_LOG(INFO, "change queue pairs from %u to %u",
569 vf->vsi_res->num_queue_pairs, num);
571 ret = iavf_dev_reset(dev);
573 PMD_DRV_LOG(ERR, "vf reset failed");
581 iavf_dev_vlan_insert_set(struct rte_eth_dev *dev)
583 struct iavf_adapter *adapter =
584 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
585 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
588 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2))
591 enable = !!(dev->data->dev_conf.txmode.offloads &
592 RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
593 iavf_config_vlan_insert_v2(adapter, enable);
599 iavf_dev_init_vlan(struct rte_eth_dev *dev)
603 err = iavf_dev_vlan_offload_set(dev,
604 RTE_ETH_VLAN_STRIP_MASK |
605 RTE_ETH_QINQ_STRIP_MASK |
606 RTE_ETH_VLAN_FILTER_MASK |
607 RTE_ETH_VLAN_EXTEND_MASK);
609 PMD_DRV_LOG(ERR, "Failed to update vlan offload");
613 err = iavf_dev_vlan_insert_set(dev);
615 PMD_DRV_LOG(ERR, "Failed to update vlan insertion");
621 iavf_dev_configure(struct rte_eth_dev *dev)
623 struct iavf_adapter *ad =
624 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
625 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
626 uint16_t num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
627 dev->data->nb_tx_queues);
633 ad->rx_bulk_alloc_allowed = true;
634 /* Initialize to TRUE. If any of Rx queues doesn't meet the
635 * vector Rx/Tx preconditions, it will be reset.
637 ad->rx_vec_allowed = true;
638 ad->tx_vec_allowed = true;
640 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
641 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
643 /* Large VF setting */
644 if (num_queue_pairs > IAVF_MAX_NUM_QUEUES_DFLT) {
645 if (!(vf->vf_res->vf_cap_flags &
646 VIRTCHNL_VF_LARGE_NUM_QPAIRS)) {
647 PMD_DRV_LOG(ERR, "large VF is not supported");
651 if (num_queue_pairs > IAVF_MAX_NUM_QUEUES_LV) {
652 PMD_DRV_LOG(ERR, "queue pairs number cannot be larger than %u",
653 IAVF_MAX_NUM_QUEUES_LV);
657 ret = iavf_queues_req_reset(dev, num_queue_pairs);
661 ret = iavf_get_max_rss_queue_region(ad);
663 PMD_INIT_LOG(ERR, "get max rss queue region failed");
667 vf->lv_enabled = true;
669 /* Check if large VF is already enabled. If so, disable and
670 * release redundant queue resource.
671 * Or check if enough queue pairs. If not, request them from PF.
673 if (vf->lv_enabled ||
674 num_queue_pairs > vf->vsi_res->num_queue_pairs) {
675 ret = iavf_queues_req_reset(dev, num_queue_pairs);
679 vf->lv_enabled = false;
681 /* if large VF is not required, use default rss queue region */
682 vf->max_rss_qregion = IAVF_MAX_NUM_QUEUES_DFLT;
685 ret = iavf_dev_init_vlan(dev);
687 PMD_DRV_LOG(ERR, "configure VLAN failed: %d", ret);
689 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
690 if (iavf_init_rss(ad) != 0) {
691 PMD_DRV_LOG(ERR, "configure rss failed");
699 iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
701 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
702 struct rte_eth_dev_data *dev_data = dev->data;
703 uint16_t buf_size, max_pkt_len;
704 uint32_t frame_size = dev->data->mtu + IAVF_ETH_OVERHEAD;
705 enum iavf_status err;
707 buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
709 /* Calculate the maximum packet length allowed */
710 max_pkt_len = RTE_MIN((uint32_t)
711 rxq->rx_buf_len * IAVF_MAX_CHAINED_RX_BUFFERS,
714 /* Check if maximum packet length is set correctly. */
715 if (max_pkt_len <= RTE_ETHER_MIN_LEN ||
716 max_pkt_len > IAVF_FRAME_SIZE_MAX) {
717 PMD_DRV_LOG(ERR, "maximum packet length must be "
718 "larger than %u and smaller than %u",
719 (uint32_t)IAVF_ETH_MAX_LEN,
720 (uint32_t)IAVF_FRAME_SIZE_MAX);
724 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
725 /* Register mbuf field and flag for Rx timestamp */
726 err = rte_mbuf_dyn_rx_timestamp_register(
727 &iavf_timestamp_dynfield_offset,
728 &iavf_timestamp_dynflag);
731 "Cannot register mbuf field/flag for timestamp");
736 rxq->max_pkt_len = max_pkt_len;
737 if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
738 rxq->max_pkt_len > buf_size) {
739 dev_data->scattered_rx = 1;
741 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
742 IAVF_WRITE_FLUSH(hw);
748 iavf_init_queues(struct rte_eth_dev *dev)
750 struct iavf_rx_queue **rxq =
751 (struct iavf_rx_queue **)dev->data->rx_queues;
752 int i, ret = IAVF_SUCCESS;
754 for (i = 0; i < dev->data->nb_rx_queues; i++) {
755 if (!rxq[i] || !rxq[i]->q_set)
757 ret = iavf_init_rxq(dev, rxq[i]);
758 if (ret != IAVF_SUCCESS)
761 /* set rx/tx function to vector/scatter/single-segment
762 * according to parameters
764 iavf_set_rx_function(dev);
765 iavf_set_tx_function(dev);
770 static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev,
771 struct rte_intr_handle *intr_handle)
773 struct iavf_adapter *adapter =
774 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
775 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
776 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
777 struct iavf_qv_map *qv_map;
778 uint16_t interval, i;
781 if (rte_intr_cap_multiple(intr_handle) &&
782 dev->data->dev_conf.intr_conf.rxq) {
783 if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues))
787 if (rte_intr_dp_is_en(intr_handle)) {
788 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
789 dev->data->nb_rx_queues)) {
790 PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
791 dev->data->nb_rx_queues);
797 qv_map = rte_zmalloc("qv_map",
798 dev->data->nb_rx_queues * sizeof(struct iavf_qv_map), 0);
800 PMD_DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
801 dev->data->nb_rx_queues);
802 goto qv_map_alloc_err;
805 if (!dev->data->dev_conf.intr_conf.rxq ||
806 !rte_intr_dp_is_en(intr_handle)) {
807 /* Rx interrupt disabled, Map interrupt only for writeback */
809 if (vf->vf_res->vf_cap_flags &
810 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
811 /* If WB_ON_ITR supports, enable it */
812 vf->msix_base = IAVF_RX_VEC_START;
813 /* Set the ITR for index zero, to 2us to make sure that
814 * we leave time for aggregation to occur, but don't
815 * increase latency dramatically.
818 IAVF_VFINT_DYN_CTLN1(vf->msix_base - 1),
819 (0 << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
820 IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
821 (2UL << IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT));
822 /* debug - check for success! the return value
823 * should be 2, offset is 0x2800
825 /* IAVF_READ_REG(hw, IAVF_VFINT_ITRN1(0, 0)); */
827 /* If no WB_ON_ITR offload flags, need to set
828 * interrupt for descriptor write back.
830 vf->msix_base = IAVF_MISC_VEC_ID;
832 /* set ITR to default */
833 interval = iavf_calc_itr_interval(
834 IAVF_QUEUE_ITR_INTERVAL_DEFAULT);
835 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
836 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
837 (IAVF_ITR_INDEX_DEFAULT <<
838 IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
840 IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT));
842 IAVF_WRITE_FLUSH(hw);
843 /* map all queues to the same interrupt */
844 for (i = 0; i < dev->data->nb_rx_queues; i++) {
845 qv_map[i].queue_id = i;
846 qv_map[i].vector_id = vf->msix_base;
850 if (!rte_intr_allow_others(intr_handle)) {
852 vf->msix_base = IAVF_MISC_VEC_ID;
853 for (i = 0; i < dev->data->nb_rx_queues; i++) {
854 qv_map[i].queue_id = i;
855 qv_map[i].vector_id = vf->msix_base;
856 rte_intr_vec_list_index_set(intr_handle,
857 i, IAVF_MISC_VEC_ID);
861 "vector %u are mapping to all Rx queues",
864 /* If Rx interrupt is required, and we can use
865 * multi interrupts, then the vec is from 1
868 RTE_MIN(rte_intr_nb_efd_get(intr_handle),
869 (uint16_t)(vf->vf_res->max_vectors - 1));
870 vf->msix_base = IAVF_RX_VEC_START;
871 vec = IAVF_RX_VEC_START;
872 for (i = 0; i < dev->data->nb_rx_queues; i++) {
873 qv_map[i].queue_id = i;
874 qv_map[i].vector_id = vec;
875 rte_intr_vec_list_index_set(intr_handle,
877 if (vec >= vf->nb_msix + IAVF_RX_VEC_START)
878 vec = IAVF_RX_VEC_START;
882 "%u vectors are mapping to %u Rx queues",
883 vf->nb_msix, dev->data->nb_rx_queues);
887 if (!vf->lv_enabled) {
888 if (iavf_config_irq_map(adapter)) {
889 PMD_DRV_LOG(ERR, "config interrupt mapping failed");
890 goto config_irq_map_err;
893 uint16_t num_qv_maps = dev->data->nb_rx_queues;
896 while (num_qv_maps > IAVF_IRQ_MAP_NUM_PER_BUF) {
897 if (iavf_config_irq_map_lv(adapter,
898 IAVF_IRQ_MAP_NUM_PER_BUF, index)) {
899 PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed");
900 goto config_irq_map_err;
902 num_qv_maps -= IAVF_IRQ_MAP_NUM_PER_BUF;
903 index += IAVF_IRQ_MAP_NUM_PER_BUF;
906 if (iavf_config_irq_map_lv(adapter, num_qv_maps, index)) {
907 PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed");
908 goto config_irq_map_err;
914 rte_free(vf->qv_map);
918 rte_intr_vec_list_free(intr_handle);
924 iavf_start_queues(struct rte_eth_dev *dev)
926 struct iavf_rx_queue *rxq;
927 struct iavf_tx_queue *txq;
929 uint16_t nb_txq, nb_rxq;
931 for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) {
932 txq = dev->data->tx_queues[nb_txq];
933 if (txq->tx_deferred_start)
935 if (iavf_dev_tx_queue_start(dev, nb_txq) != 0) {
936 PMD_DRV_LOG(ERR, "Fail to start tx queue %u", nb_txq);
941 for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) {
942 rxq = dev->data->rx_queues[nb_rxq];
943 if (rxq->rx_deferred_start)
945 if (iavf_dev_rx_queue_start(dev, nb_rxq) != 0) {
946 PMD_DRV_LOG(ERR, "Fail to start rx queue %u", nb_rxq);
954 for (i = 0; i < nb_rxq; i++)
955 iavf_dev_rx_queue_stop(dev, i);
957 for (i = 0; i < nb_txq; i++)
958 iavf_dev_tx_queue_stop(dev, i);
964 iavf_dev_start(struct rte_eth_dev *dev)
966 struct iavf_adapter *adapter =
967 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
968 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
969 struct rte_intr_handle *intr_handle = dev->intr_handle;
970 uint16_t num_queue_pairs;
973 PMD_INIT_FUNC_TRACE();
978 adapter->stopped = 0;
980 vf->max_pkt_len = dev->data->mtu + IAVF_ETH_OVERHEAD;
981 vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
982 dev->data->nb_tx_queues);
983 num_queue_pairs = vf->num_queue_pairs;
985 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
986 if (iavf_get_qos_cap(adapter)) {
987 PMD_INIT_LOG(ERR, "Failed to get qos capability");
991 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_PTP) {
992 if (iavf_get_ptp_cap(adapter)) {
993 PMD_INIT_LOG(ERR, "Failed to get ptp capability");
998 if (iavf_init_queues(dev) != 0) {
999 PMD_DRV_LOG(ERR, "failed to do Queue init");
1003 if (iavf_set_vf_quanta_size(adapter, index, num_queue_pairs) != 0)
1004 PMD_DRV_LOG(WARNING, "configure quanta size failed");
1006 /* If needed, send configure queues msg multiple times to make the
1007 * adminq buffer length smaller than the 4K limitation.
1009 while (num_queue_pairs > IAVF_CFG_Q_NUM_PER_BUF) {
1010 if (iavf_configure_queues(adapter,
1011 IAVF_CFG_Q_NUM_PER_BUF, index) != 0) {
1012 PMD_DRV_LOG(ERR, "configure queues failed");
1015 num_queue_pairs -= IAVF_CFG_Q_NUM_PER_BUF;
1016 index += IAVF_CFG_Q_NUM_PER_BUF;
1019 if (iavf_configure_queues(adapter, num_queue_pairs, index) != 0) {
1020 PMD_DRV_LOG(ERR, "configure queues failed");
1024 if (iavf_config_rx_queues_irqs(dev, intr_handle) != 0) {
1025 PMD_DRV_LOG(ERR, "configure irq failed");
1028 /* re-enable intr again, because efd assign may change */
1029 if (dev->data->dev_conf.intr_conf.rxq != 0) {
1030 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1031 rte_intr_disable(intr_handle);
1032 rte_intr_enable(intr_handle);
1035 /* Set all mac addrs */
1036 iavf_add_del_all_mac_addr(adapter, true);
1038 /* Set all multicast addresses */
1039 iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
1042 rte_spinlock_init(&vf->phc_time_aq_lock);
1044 if (iavf_start_queues(dev) != 0) {
1045 PMD_DRV_LOG(ERR, "enable queues failed");
1052 iavf_add_del_all_mac_addr(adapter, false);
1058 iavf_dev_stop(struct rte_eth_dev *dev)
1060 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1061 struct iavf_adapter *adapter =
1062 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1063 struct rte_intr_handle *intr_handle = dev->intr_handle;
1065 PMD_INIT_FUNC_TRACE();
1067 if (adapter->closed)
1070 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) &&
1071 dev->data->dev_conf.intr_conf.rxq != 0)
1072 rte_intr_disable(intr_handle);
1074 if (adapter->stopped == 1)
1077 iavf_stop_queues(dev);
1079 /* Disable the interrupt for Rx */
1080 rte_intr_efd_disable(intr_handle);
1081 /* Rx interrupt vector mapping free */
1082 rte_intr_vec_list_free(intr_handle);
1084 /* remove all mac addrs */
1085 iavf_add_del_all_mac_addr(adapter, false);
1087 /* remove all multicast addresses */
1088 iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
1091 adapter->stopped = 1;
1092 dev->data->dev_started = 0;
1098 iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1100 struct iavf_adapter *adapter =
1101 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1102 struct iavf_info *vf = &adapter->vf;
1104 if (adapter->closed)
1107 dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
1108 dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
1109 dev_info->min_rx_bufsize = IAVF_BUF_SIZE_MIN;
1110 dev_info->max_rx_pktlen = IAVF_FRAME_SIZE_MAX;
1111 dev_info->max_mtu = dev_info->max_rx_pktlen - IAVF_ETH_OVERHEAD;
1112 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
1113 dev_info->hash_key_size = vf->vf_res->rss_key_size;
1114 dev_info->reta_size = vf->vf_res->rss_lut_size;
1115 dev_info->flow_type_rss_offloads = IAVF_RSS_OFFLOAD_ALL;
1116 dev_info->max_mac_addrs = IAVF_NUM_MACADDR_MAX;
1117 dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
1118 dev_info->rx_offload_capa =
1119 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
1120 RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
1121 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
1122 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
1123 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
1124 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1125 RTE_ETH_RX_OFFLOAD_SCATTER |
1126 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
1127 RTE_ETH_RX_OFFLOAD_RSS_HASH;
1129 dev_info->tx_offload_capa =
1130 RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
1131 RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
1132 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
1133 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
1134 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
1135 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
1136 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1137 RTE_ETH_TX_OFFLOAD_TCP_TSO |
1138 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
1139 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
1140 RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
1141 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
1142 RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
1143 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
1145 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
1146 dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
1148 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_PTP)
1149 dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
1151 if (iavf_ipsec_crypto_supported(adapter)) {
1152 dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SECURITY;
1153 dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
1156 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1157 .rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
1162 dev_info->default_txconf = (struct rte_eth_txconf) {
1163 .tx_free_thresh = IAVF_DEFAULT_TX_FREE_THRESH,
1164 .tx_rs_thresh = IAVF_DEFAULT_TX_RS_THRESH,
1168 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1169 .nb_max = IAVF_MAX_RING_DESC,
1170 .nb_min = IAVF_MIN_RING_DESC,
1171 .nb_align = IAVF_ALIGN_RING_DESC,
1174 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1175 .nb_max = IAVF_MAX_RING_DESC,
1176 .nb_min = IAVF_MIN_RING_DESC,
1177 .nb_align = IAVF_ALIGN_RING_DESC,
1183 static const uint32_t *
1184 iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1186 static const uint32_t ptypes[] = {
1188 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1191 RTE_PTYPE_L4_NONFRAG,
1201 iavf_dev_link_update(struct rte_eth_dev *dev,
1202 __rte_unused int wait_to_complete)
1204 struct rte_eth_link new_link;
1205 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1207 memset(&new_link, 0, sizeof(new_link));
1209 /* Only read status info stored in VF, and the info is updated
1210 * when receive LINK_CHANGE evnet from PF by Virtchnnl.
1212 switch (vf->link_speed) {
1214 new_link.link_speed = RTE_ETH_SPEED_NUM_10M;
1217 new_link.link_speed = RTE_ETH_SPEED_NUM_100M;
1220 new_link.link_speed = RTE_ETH_SPEED_NUM_1G;
1223 new_link.link_speed = RTE_ETH_SPEED_NUM_10G;
1226 new_link.link_speed = RTE_ETH_SPEED_NUM_20G;
1229 new_link.link_speed = RTE_ETH_SPEED_NUM_25G;
1232 new_link.link_speed = RTE_ETH_SPEED_NUM_40G;
1235 new_link.link_speed = RTE_ETH_SPEED_NUM_50G;
1238 new_link.link_speed = RTE_ETH_SPEED_NUM_100G;
1241 new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1245 new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1246 new_link.link_status = vf->link_up ? RTE_ETH_LINK_UP :
1248 new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1249 RTE_ETH_LINK_SPEED_FIXED);
1251 return rte_eth_linkstatus_set(dev, &new_link);
1255 iavf_dev_promiscuous_enable(struct rte_eth_dev *dev)
1257 struct iavf_adapter *adapter =
1258 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1259 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1261 return iavf_config_promisc(adapter,
1262 true, vf->promisc_multicast_enabled);
1266 iavf_dev_promiscuous_disable(struct rte_eth_dev *dev)
1268 struct iavf_adapter *adapter =
1269 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1270 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1272 return iavf_config_promisc(adapter,
1273 false, vf->promisc_multicast_enabled);
1277 iavf_dev_allmulticast_enable(struct rte_eth_dev *dev)
1279 struct iavf_adapter *adapter =
1280 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1281 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1283 return iavf_config_promisc(adapter,
1284 vf->promisc_unicast_enabled, true);
1288 iavf_dev_allmulticast_disable(struct rte_eth_dev *dev)
1290 struct iavf_adapter *adapter =
1291 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1292 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1294 return iavf_config_promisc(adapter,
1295 vf->promisc_unicast_enabled, false);
1299 iavf_dev_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr,
1300 __rte_unused uint32_t index,
1301 __rte_unused uint32_t pool)
1303 struct iavf_adapter *adapter =
1304 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1305 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1308 if (rte_is_zero_ether_addr(addr)) {
1309 PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1313 err = iavf_add_del_eth_addr(adapter, addr, true, VIRTCHNL_ETHER_ADDR_EXTRA);
1315 PMD_DRV_LOG(ERR, "fail to add MAC address");
1325 iavf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1327 struct iavf_adapter *adapter =
1328 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1329 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1330 struct rte_ether_addr *addr;
1333 addr = &dev->data->mac_addrs[index];
1335 err = iavf_add_del_eth_addr(adapter, addr, false, VIRTCHNL_ETHER_ADDR_EXTRA);
1337 PMD_DRV_LOG(ERR, "fail to delete MAC address");
1343 iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1345 struct iavf_adapter *adapter =
1346 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1347 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1350 if (adapter->closed)
1353 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
1354 err = iavf_add_del_vlan_v2(adapter, vlan_id, on);
1360 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
1363 err = iavf_add_del_vlan(adapter, vlan_id, on);
1370 iavf_iterate_vlan_filters_v2(struct rte_eth_dev *dev, bool enable)
1372 struct rte_vlan_filter_conf *vfc = &dev->data->vlan_filter_conf;
1373 struct iavf_adapter *adapter =
1374 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1378 for (i = 0; i < RTE_DIM(vfc->ids); i++) {
1379 if (vfc->ids[i] == 0)
1383 for (j = 0; ids != 0 && j < 64; j++, ids >>= 1) {
1385 iavf_add_del_vlan_v2(adapter,
1386 64 * i + j, enable);
1392 iavf_dev_vlan_offload_set_v2(struct rte_eth_dev *dev, int mask)
1394 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1395 struct iavf_adapter *adapter =
1396 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1400 if (mask & RTE_ETH_VLAN_FILTER_MASK) {
1401 enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
1403 iavf_iterate_vlan_filters_v2(dev, enable);
1406 if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1407 enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
1409 err = iavf_config_vlan_strip_v2(adapter, enable);
1410 /* If not support, the stripping is already disabled by PF */
1411 if (err == -ENOTSUP && !enable)
1421 iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1423 struct iavf_adapter *adapter =
1424 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1425 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1426 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1429 if (adapter->closed)
1432 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2)
1433 return iavf_dev_vlan_offload_set_v2(dev, mask);
1435 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
1438 /* Vlan stripping setting */
1439 if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1440 /* Enable or disable VLAN stripping */
1441 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1442 err = iavf_enable_vlan_strip(adapter);
1444 err = iavf_disable_vlan_strip(adapter);
1453 iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
1454 struct rte_eth_rss_reta_entry64 *reta_conf,
1457 struct iavf_adapter *adapter =
1458 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1459 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1461 uint16_t i, idx, shift;
1464 if (adapter->closed)
1467 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1470 if (reta_size != vf->vf_res->rss_lut_size) {
1471 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1472 "(%d) doesn't match the number of hardware can "
1473 "support (%d)", reta_size, vf->vf_res->rss_lut_size);
1477 lut = rte_zmalloc("rss_lut", reta_size, 0);
1479 PMD_DRV_LOG(ERR, "No memory can be allocated");
1482 /* store the old lut table temporarily */
1483 rte_memcpy(lut, vf->rss_lut, reta_size);
1485 for (i = 0; i < reta_size; i++) {
1486 idx = i / RTE_ETH_RETA_GROUP_SIZE;
1487 shift = i % RTE_ETH_RETA_GROUP_SIZE;
1488 if (reta_conf[idx].mask & (1ULL << shift))
1489 lut[i] = reta_conf[idx].reta[shift];
1492 rte_memcpy(vf->rss_lut, lut, reta_size);
1493 /* send virtchnl ops to configure RSS */
1494 ret = iavf_configure_rss_lut(adapter);
1495 if (ret) /* revert back */
1496 rte_memcpy(vf->rss_lut, lut, reta_size);
1503 iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
1504 struct rte_eth_rss_reta_entry64 *reta_conf,
1507 struct iavf_adapter *adapter =
1508 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1509 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1510 uint16_t i, idx, shift;
1512 if (adapter->closed)
1515 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1518 if (reta_size != vf->vf_res->rss_lut_size) {
1519 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1520 "(%d) doesn't match the number of hardware can "
1521 "support (%d)", reta_size, vf->vf_res->rss_lut_size);
1525 for (i = 0; i < reta_size; i++) {
1526 idx = i / RTE_ETH_RETA_GROUP_SIZE;
1527 shift = i % RTE_ETH_RETA_GROUP_SIZE;
1528 if (reta_conf[idx].mask & (1ULL << shift))
1529 reta_conf[idx].reta[shift] = vf->rss_lut[i];
1536 iavf_set_rss_key(struct iavf_adapter *adapter, uint8_t *key, uint8_t key_len)
1538 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1540 /* HENA setting, it is enabled by default, no change */
1541 if (!key || key_len == 0) {
1542 PMD_DRV_LOG(DEBUG, "No key to be configured");
1544 } else if (key_len != vf->vf_res->rss_key_size) {
1545 PMD_DRV_LOG(ERR, "The size of hash key configured "
1546 "(%d) doesn't match the size of hardware can "
1547 "support (%d)", key_len,
1548 vf->vf_res->rss_key_size);
1552 rte_memcpy(vf->rss_key, key, key_len);
1554 return iavf_configure_rss_key(adapter);
1558 iavf_dev_rss_hash_update(struct rte_eth_dev *dev,
1559 struct rte_eth_rss_conf *rss_conf)
1561 struct iavf_adapter *adapter =
1562 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1563 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1566 adapter->dev_data->dev_conf.rx_adv_conf.rss_conf = *rss_conf;
1568 if (adapter->closed)
1571 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1575 ret = iavf_set_rss_key(adapter, rss_conf->rss_key,
1576 rss_conf->rss_key_len);
1580 if (rss_conf->rss_hf == 0) {
1582 ret = iavf_set_hena(adapter, 0);
1584 /* It is a workaround, temporarily allow error to be returned
1585 * due to possible lack of PF handling for hena = 0.
1588 PMD_DRV_LOG(WARNING, "fail to clean existing RSS, lack PF support");
1592 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF) {
1593 /* Clear existing RSS. */
1594 ret = iavf_set_hena(adapter, 0);
1596 /* It is a workaround, temporarily allow error to be returned
1597 * due to possible lack of PF handling for hena = 0.
1600 PMD_DRV_LOG(WARNING, "fail to clean existing RSS,"
1603 /* Set new RSS configuration. */
1604 ret = iavf_rss_hash_set(adapter, rss_conf->rss_hf, true);
1606 PMD_DRV_LOG(ERR, "fail to set new RSS");
1610 iavf_config_rss_hf(adapter, rss_conf->rss_hf);
1617 iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1618 struct rte_eth_rss_conf *rss_conf)
1620 struct iavf_adapter *adapter =
1621 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1622 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1624 if (adapter->closed)
1627 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1630 rss_conf->rss_hf = vf->rss_hf;
1632 if (!rss_conf->rss_key)
1635 rss_conf->rss_key_len = vf->vf_res->rss_key_size;
1636 rte_memcpy(rss_conf->rss_key, vf->rss_key, rss_conf->rss_key_len);
1642 iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu __rte_unused)
1644 /* mtu setting is forbidden if port is start */
1645 if (dev->data->dev_started) {
1646 PMD_DRV_LOG(ERR, "port must be stopped before configuration");
1654 iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
1655 struct rte_ether_addr *mac_addr)
1657 struct iavf_adapter *adapter =
1658 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1659 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
1660 struct rte_ether_addr *old_addr;
1663 old_addr = (struct rte_ether_addr *)hw->mac.addr;
1665 if (rte_is_same_ether_addr(old_addr, mac_addr))
1668 ret = iavf_add_del_eth_addr(adapter, old_addr, false, VIRTCHNL_ETHER_ADDR_PRIMARY);
1670 PMD_DRV_LOG(ERR, "Fail to delete old MAC:"
1671 RTE_ETHER_ADDR_PRT_FMT,
1672 RTE_ETHER_ADDR_BYTES(old_addr));
1674 ret = iavf_add_del_eth_addr(adapter, mac_addr, true, VIRTCHNL_ETHER_ADDR_PRIMARY);
1676 PMD_DRV_LOG(ERR, "Fail to add new MAC:"
1677 RTE_ETHER_ADDR_PRT_FMT,
1678 RTE_ETHER_ADDR_BYTES(mac_addr));
1683 rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)hw->mac.addr);
1688 iavf_stat_update_48(uint64_t *offset, uint64_t *stat)
1690 if (*stat >= *offset)
1691 *stat = *stat - *offset;
1693 *stat = (uint64_t)((*stat +
1694 ((uint64_t)1 << IAVF_48_BIT_WIDTH)) - *offset);
1696 *stat &= IAVF_48_BIT_MASK;
1700 iavf_stat_update_32(uint64_t *offset, uint64_t *stat)
1702 if (*stat >= *offset)
1703 *stat = (uint64_t)(*stat - *offset);
1705 *stat = (uint64_t)((*stat +
1706 ((uint64_t)1 << IAVF_32_BIT_WIDTH)) - *offset);
1710 iavf_update_stats(struct iavf_vsi *vsi, struct virtchnl_eth_stats *nes)
1712 struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset.eth_stats;
1714 iavf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
1715 iavf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
1716 iavf_stat_update_48(&oes->rx_multicast, &nes->rx_multicast);
1717 iavf_stat_update_48(&oes->rx_broadcast, &nes->rx_broadcast);
1718 iavf_stat_update_32(&oes->rx_discards, &nes->rx_discards);
1719 iavf_stat_update_48(&oes->tx_bytes, &nes->tx_bytes);
1720 iavf_stat_update_48(&oes->tx_unicast, &nes->tx_unicast);
1721 iavf_stat_update_48(&oes->tx_multicast, &nes->tx_multicast);
1722 iavf_stat_update_48(&oes->tx_broadcast, &nes->tx_broadcast);
1723 iavf_stat_update_32(&oes->tx_errors, &nes->tx_errors);
1724 iavf_stat_update_32(&oes->tx_discards, &nes->tx_discards);
1728 iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1730 struct iavf_adapter *adapter =
1731 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1732 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1733 struct iavf_vsi *vsi = &vf->vsi;
1734 struct virtchnl_eth_stats *pstats = NULL;
1737 ret = iavf_query_stats(adapter, &pstats);
1739 uint8_t crc_stats_len = (dev->data->dev_conf.rxmode.offloads &
1740 RTE_ETH_RX_OFFLOAD_KEEP_CRC) ? 0 :
1742 iavf_update_stats(vsi, pstats);
1743 stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
1744 pstats->rx_broadcast - pstats->rx_discards;
1745 stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
1747 stats->imissed = pstats->rx_discards;
1748 stats->oerrors = pstats->tx_errors + pstats->tx_discards;
1749 stats->ibytes = pstats->rx_bytes;
1750 stats->ibytes -= stats->ipackets * crc_stats_len;
1751 stats->obytes = pstats->tx_bytes;
1753 PMD_DRV_LOG(ERR, "Get statistics failed");
1759 iavf_dev_stats_reset(struct rte_eth_dev *dev)
1762 struct iavf_adapter *adapter =
1763 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1764 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1765 struct iavf_vsi *vsi = &vf->vsi;
1766 struct virtchnl_eth_stats *pstats = NULL;
1768 /* read stat values to clear hardware registers */
1769 ret = iavf_query_stats(adapter, &pstats);
1773 /* set stats offset base on current values */
1774 vsi->eth_stats_offset.eth_stats = *pstats;
1780 iavf_dev_xstats_reset(struct rte_eth_dev *dev)
1782 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1783 iavf_dev_stats_reset(dev);
1784 memset(&vf->vsi.eth_stats_offset.ips_stats, 0,
1785 sizeof(struct iavf_ipsec_crypto_stats));
1789 static int iavf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1790 struct rte_eth_xstat_name *xstats_names,
1791 __rte_unused unsigned int limit)
1795 if (xstats_names != NULL)
1796 for (i = 0; i < IAVF_NB_XSTATS; i++) {
1797 snprintf(xstats_names[i].name,
1798 sizeof(xstats_names[i].name),
1799 "%s", rte_iavf_stats_strings[i].name);
1801 return IAVF_NB_XSTATS;
1805 iavf_dev_update_ipsec_xstats(struct rte_eth_dev *ethdev,
1806 struct iavf_ipsec_crypto_stats *ips)
1809 for (idx = 0; idx < ethdev->data->nb_rx_queues; idx++) {
1810 struct iavf_rx_queue *rxq;
1811 struct iavf_ipsec_crypto_stats *stats;
1812 rxq = (struct iavf_rx_queue *)ethdev->data->rx_queues[idx];
1813 stats = &rxq->stats.ipsec_crypto;
1814 ips->icount += stats->icount;
1815 ips->ibytes += stats->ibytes;
1816 ips->ierrors.count += stats->ierrors.count;
1817 ips->ierrors.sad_miss += stats->ierrors.sad_miss;
1818 ips->ierrors.not_processed += stats->ierrors.not_processed;
1819 ips->ierrors.icv_check += stats->ierrors.icv_check;
1820 ips->ierrors.ipsec_length += stats->ierrors.ipsec_length;
1821 ips->ierrors.misc += stats->ierrors.misc;
1825 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
1826 struct rte_eth_xstat *xstats, unsigned int n)
1830 struct iavf_adapter *adapter =
1831 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1832 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1833 struct iavf_vsi *vsi = &vf->vsi;
1834 struct virtchnl_eth_stats *pstats = NULL;
1835 struct iavf_eth_xstats iavf_xtats = {{0}};
1837 if (n < IAVF_NB_XSTATS)
1838 return IAVF_NB_XSTATS;
1840 ret = iavf_query_stats(adapter, &pstats);
1847 iavf_update_stats(vsi, pstats);
1848 iavf_xtats.eth_stats = *pstats;
1850 if (iavf_ipsec_crypto_supported(adapter))
1851 iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats);
1853 /* loop over xstats array and values from pstats */
1854 for (i = 0; i < IAVF_NB_XSTATS; i++) {
1856 xstats[i].value = *(uint64_t *)(((char *)&iavf_xtats) +
1857 rte_iavf_stats_strings[i].offset);
1860 return IAVF_NB_XSTATS;
1865 iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1867 struct iavf_adapter *adapter =
1868 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1869 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1870 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
1871 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1874 if (adapter->closed)
1877 msix_intr = rte_intr_vec_list_index_get(pci_dev->intr_handle,
1879 if (msix_intr == IAVF_MISC_VEC_ID) {
1880 PMD_DRV_LOG(INFO, "MISC is also enabled for control");
1881 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
1882 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
1883 IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
1884 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
1887 IAVF_VFINT_DYN_CTLN1
1888 (msix_intr - IAVF_RX_VEC_START),
1889 IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
1890 IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
1891 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
1894 IAVF_WRITE_FLUSH(hw);
1896 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1897 rte_intr_ack(pci_dev->intr_handle);
1903 iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1905 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1906 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1909 msix_intr = rte_intr_vec_list_index_get(pci_dev->intr_handle,
1911 if (msix_intr == IAVF_MISC_VEC_ID) {
1912 PMD_DRV_LOG(ERR, "MISC is used for control, cannot disable it");
1917 IAVF_VFINT_DYN_CTLN1(msix_intr - IAVF_RX_VEC_START),
1918 IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK);
1920 IAVF_WRITE_FLUSH(hw);
1925 iavf_check_vf_reset_done(struct iavf_hw *hw)
1929 for (i = 0; i < IAVF_RESET_WAIT_CNT; i++) {
1930 reset = IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) &
1931 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
1932 reset = reset >> IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT;
1933 if (reset == VIRTCHNL_VFR_VFACTIVE ||
1934 reset == VIRTCHNL_VFR_COMPLETED)
1939 if (i >= IAVF_RESET_WAIT_CNT)
1946 iavf_lookup_proto_xtr_type(const char *flex_name)
1950 enum iavf_proto_xtr_type type;
1951 } xtr_type_map[] = {
1952 { "vlan", IAVF_PROTO_XTR_VLAN },
1953 { "ipv4", IAVF_PROTO_XTR_IPV4 },
1954 { "ipv6", IAVF_PROTO_XTR_IPV6 },
1955 { "ipv6_flow", IAVF_PROTO_XTR_IPV6_FLOW },
1956 { "tcp", IAVF_PROTO_XTR_TCP },
1957 { "ip_offset", IAVF_PROTO_XTR_IP_OFFSET },
1958 { "ipsec_crypto_said", IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID },
1962 for (i = 0; i < RTE_DIM(xtr_type_map); i++) {
1963 if (strcmp(flex_name, xtr_type_map[i].name) == 0)
1964 return xtr_type_map[i].type;
1967 PMD_DRV_LOG(ERR, "wrong proto_xtr type, it should be: "
1968 "vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset|ipsec_crypto_said");
1974 * Parse elem, the elem could be single number/range or '(' ')' group
1975 * 1) A single number elem, it's just a simple digit. e.g. 9
1976 * 2) A single range elem, two digits with a '-' between. e.g. 2-6
1977 * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6)
1978 * Within group elem, '-' used for a range separator;
1979 * ',' used for a single number.
1982 iavf_parse_queue_set(const char *input, int xtr_type,
1983 struct iavf_devargs *devargs)
1985 const char *str = input;
1990 while (isblank(*str))
1993 if (!isdigit(*str) && *str != '(')
1996 /* process single number or single range of number */
1999 idx = strtoul(str, &end, 10);
2000 if (errno || !end || idx >= IAVF_MAX_QUEUE_NUM)
2003 while (isblank(*end))
2009 /* process single <number>-<number> */
2012 while (isblank(*end))
2018 idx = strtoul(end, &end, 10);
2019 if (errno || !end || idx >= IAVF_MAX_QUEUE_NUM)
2023 while (isblank(*end))
2030 for (idx = RTE_MIN(min, max);
2031 idx <= RTE_MAX(min, max); idx++)
2032 devargs->proto_xtr[idx] = xtr_type;
2037 /* process set within bracket */
2039 while (isblank(*str))
2044 min = IAVF_MAX_QUEUE_NUM;
2046 /* go ahead to the first digit */
2047 while (isblank(*str))
2052 /* get the digit value */
2054 idx = strtoul(str, &end, 10);
2055 if (errno || !end || idx >= IAVF_MAX_QUEUE_NUM)
2058 /* go ahead to separator '-',',' and ')' */
2059 while (isblank(*end))
2062 if (min == IAVF_MAX_QUEUE_NUM)
2064 else /* avoid continuous '-' */
2066 } else if (*end == ',' || *end == ')') {
2068 if (min == IAVF_MAX_QUEUE_NUM)
2071 for (idx = RTE_MIN(min, max);
2072 idx <= RTE_MAX(min, max); idx++)
2073 devargs->proto_xtr[idx] = xtr_type;
2075 min = IAVF_MAX_QUEUE_NUM;
2081 } while (*end != ')' && *end != '\0');
2087 iavf_parse_queue_proto_xtr(const char *queues, struct iavf_devargs *devargs)
2089 const char *queue_start;
2094 while (isblank(*queues))
2097 if (*queues != '[') {
2098 xtr_type = iavf_lookup_proto_xtr_type(queues);
2102 devargs->proto_xtr_dflt = xtr_type;
2109 while (isblank(*queues))
2111 if (*queues == '\0')
2114 queue_start = queues;
2116 /* go across a complete bracket */
2117 if (*queue_start == '(') {
2118 queues += strcspn(queues, ")");
2123 /* scan the separator ':' */
2124 queues += strcspn(queues, ":");
2125 if (*queues++ != ':')
2127 while (isblank(*queues))
2130 for (idx = 0; ; idx++) {
2131 if (isblank(queues[idx]) ||
2132 queues[idx] == ',' ||
2133 queues[idx] == ']' ||
2134 queues[idx] == '\0')
2137 if (idx > sizeof(flex_name) - 2)
2140 flex_name[idx] = queues[idx];
2142 flex_name[idx] = '\0';
2143 xtr_type = iavf_lookup_proto_xtr_type(flex_name);
2149 while (isblank(*queues) || *queues == ',' || *queues == ']')
2152 if (iavf_parse_queue_set(queue_start, xtr_type, devargs) < 0)
2154 } while (*queues != '\0');
2160 iavf_handle_proto_xtr_arg(__rte_unused const char *key, const char *value,
2163 struct iavf_devargs *devargs = extra_args;
2165 if (!value || !extra_args)
2168 if (iavf_parse_queue_proto_xtr(value, devargs) < 0) {
2169 PMD_DRV_LOG(ERR, "the proto_xtr's parameter is wrong : '%s'",
2178 parse_u16(__rte_unused const char *key, const char *value, void *args)
2180 u16 *num = (u16 *)args;
2184 tmp = strtoull(value, NULL, 10);
2185 if (errno || !tmp) {
2186 PMD_DRV_LOG(WARNING, "%s: \"%s\" is not a valid u16",
2196 static int iavf_parse_devargs(struct rte_eth_dev *dev)
2198 struct iavf_adapter *ad =
2199 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2200 struct rte_devargs *devargs = dev->device->devargs;
2201 struct rte_kvargs *kvlist;
2207 kvlist = rte_kvargs_parse(devargs->args, iavf_valid_args);
2209 PMD_INIT_LOG(ERR, "invalid kvargs key\n");
2213 ad->devargs.proto_xtr_dflt = IAVF_PROTO_XTR_NONE;
2214 memset(ad->devargs.proto_xtr, IAVF_PROTO_XTR_NONE,
2215 sizeof(ad->devargs.proto_xtr));
2217 ret = rte_kvargs_process(kvlist, IAVF_PROTO_XTR_ARG,
2218 &iavf_handle_proto_xtr_arg, &ad->devargs);
2222 ret = rte_kvargs_process(kvlist, IAVF_QUANTA_SIZE_ARG,
2223 &parse_u16, &ad->devargs.quanta_size);
2227 if (ad->devargs.quanta_size != 0 &&
2228 (ad->devargs.quanta_size < 256 || ad->devargs.quanta_size > 4096 ||
2229 ad->devargs.quanta_size & 0x40)) {
2230 PMD_INIT_LOG(ERR, "invalid quanta size\n");
2236 rte_kvargs_free(kvlist);
2241 iavf_init_proto_xtr(struct rte_eth_dev *dev)
2243 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2244 struct iavf_adapter *ad =
2245 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2246 const struct iavf_proto_xtr_ol *xtr_ol;
2247 bool proto_xtr_enable = false;
2251 vf->proto_xtr = rte_zmalloc("vf proto xtr",
2252 vf->vsi_res->num_queue_pairs, 0);
2253 if (unlikely(!(vf->proto_xtr))) {
2254 PMD_DRV_LOG(ERR, "no memory for setting up proto_xtr's table");
2258 for (i = 0; i < vf->vsi_res->num_queue_pairs; i++) {
2259 vf->proto_xtr[i] = ad->devargs.proto_xtr[i] !=
2260 IAVF_PROTO_XTR_NONE ?
2261 ad->devargs.proto_xtr[i] :
2262 ad->devargs.proto_xtr_dflt;
2264 if (vf->proto_xtr[i] != IAVF_PROTO_XTR_NONE) {
2265 uint8_t type = vf->proto_xtr[i];
2267 iavf_proto_xtr_params[type].required = true;
2268 proto_xtr_enable = true;
2272 if (likely(!proto_xtr_enable))
2275 offset = rte_mbuf_dynfield_register(&iavf_proto_xtr_metadata_param);
2276 if (unlikely(offset == -1)) {
2278 "failed to extract protocol metadata, error %d",
2284 "proto_xtr metadata offset in mbuf is : %d",
2286 rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = offset;
2288 for (i = 0; i < RTE_DIM(iavf_proto_xtr_params); i++) {
2289 xtr_ol = &iavf_proto_xtr_params[i];
2291 uint8_t rxdid = iavf_proto_xtr_type_to_rxdid((uint8_t)i);
2293 if (!xtr_ol->required)
2296 if (!(vf->supported_rxdid & BIT(rxdid))) {
2298 "rxdid[%u] is not supported in hardware",
2300 rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
2304 offset = rte_mbuf_dynflag_register(&xtr_ol->param);
2305 if (unlikely(offset == -1)) {
2307 "failed to register proto_xtr offload '%s', error %d",
2308 xtr_ol->param.name, -rte_errno);
2310 rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
2315 "proto_xtr offload '%s' offset in mbuf is : %d",
2316 xtr_ol->param.name, offset);
2317 *xtr_ol->ol_flag = 1ULL << offset;
2322 iavf_init_vf(struct rte_eth_dev *dev)
2325 struct iavf_adapter *adapter =
2326 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2327 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2328 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2332 err = iavf_parse_devargs(dev);
2334 PMD_INIT_LOG(ERR, "Failed to parse devargs");
2338 err = iavf_set_mac_type(hw);
2340 PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
2344 err = iavf_check_vf_reset_done(hw);
2346 PMD_INIT_LOG(ERR, "VF is still resetting");
2350 iavf_init_adminq_parameter(hw);
2351 err = iavf_init_adminq(hw);
2353 PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
2357 vf->aq_resp = rte_zmalloc("vf_aq_resp", IAVF_AQ_BUF_SZ, 0);
2359 PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory");
2362 if (iavf_check_api_version(adapter) != 0) {
2363 PMD_INIT_LOG(ERR, "check_api version failed");
2367 bufsz = sizeof(struct virtchnl_vf_resource) +
2368 (IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
2369 vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
2371 PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
2375 if (iavf_get_vf_resource(adapter) != 0) {
2376 PMD_INIT_LOG(ERR, "iavf_get_vf_config failed");
2379 /* Allocate memort for RSS info */
2380 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2381 vf->rss_key = rte_zmalloc("rss_key",
2382 vf->vf_res->rss_key_size, 0);
2384 PMD_INIT_LOG(ERR, "unable to allocate rss_key memory");
2387 vf->rss_lut = rte_zmalloc("rss_lut",
2388 vf->vf_res->rss_lut_size, 0);
2390 PMD_INIT_LOG(ERR, "unable to allocate rss_lut memory");
2395 if (vf->vsi_res->num_queue_pairs > IAVF_MAX_NUM_QUEUES_DFLT)
2396 vf->lv_enabled = true;
2398 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
2399 if (iavf_get_supported_rxdid(adapter) != 0) {
2400 PMD_INIT_LOG(ERR, "failed to do get supported rxdid");
2405 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
2406 if (iavf_get_vlan_offload_caps_v2(adapter) != 0) {
2407 PMD_INIT_LOG(ERR, "failed to do get VLAN offload v2 capabilities");
2412 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS) {
2413 bufsz = sizeof(struct virtchnl_qos_cap_list) +
2414 IAVF_MAX_TRAFFIC_CLASS *
2415 sizeof(struct virtchnl_qos_cap_elem);
2416 vf->qos_cap = rte_zmalloc("qos_cap", bufsz, 0);
2418 PMD_INIT_LOG(ERR, "unable to allocate qos_cap memory");
2421 iavf_tm_conf_init(dev);
2424 iavf_init_proto_xtr(dev);
2428 rte_free(vf->rss_key);
2429 rte_free(vf->rss_lut);
2431 rte_free(vf->qos_cap);
2432 rte_free(vf->vf_res);
2435 rte_free(vf->aq_resp);
2437 iavf_shutdown_adminq(hw);
2443 iavf_uninit_vf(struct rte_eth_dev *dev)
2445 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2446 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2448 iavf_shutdown_adminq(hw);
2450 rte_free(vf->vf_res);
2454 rte_free(vf->aq_resp);
2457 rte_free(vf->qos_cap);
2460 rte_free(vf->rss_lut);
2462 rte_free(vf->rss_key);
2466 /* Enable default admin queue interrupt setting */
2468 iavf_enable_irq0(struct iavf_hw *hw)
2470 /* Enable admin queue interrupt trigger */
2471 IAVF_WRITE_REG(hw, IAVF_VFINT_ICR0_ENA1,
2472 IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
2474 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
2475 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
2476 IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
2477 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
2479 IAVF_WRITE_FLUSH(hw);
2483 iavf_disable_irq0(struct iavf_hw *hw)
2485 /* Disable all interrupt types */
2486 IAVF_WRITE_REG(hw, IAVF_VFINT_ICR0_ENA1, 0);
2487 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
2488 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
2489 IAVF_WRITE_FLUSH(hw);
2493 iavf_dev_interrupt_handler(void *param)
2495 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2496 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2498 iavf_disable_irq0(hw);
2500 iavf_handle_virtchnl_msg(dev);
2502 iavf_enable_irq0(hw);
2506 iavf_dev_alarm_handler(void *param)
2508 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2509 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2512 iavf_disable_irq0(hw);
2514 /* read out interrupt causes */
2515 icr0 = IAVF_READ_REG(hw, IAVF_VFINT_ICR01);
2517 if (icr0 & IAVF_VFINT_ICR01_ADMINQ_MASK) {
2518 PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported");
2519 iavf_handle_virtchnl_msg(dev);
2522 iavf_enable_irq0(hw);
2524 rte_eal_alarm_set(IAVF_ALARM_INTERVAL,
2525 iavf_dev_alarm_handler, dev);
2529 iavf_dev_flow_ops_get(struct rte_eth_dev *dev,
2530 const struct rte_flow_ops **ops)
2532 struct iavf_adapter *adapter =
2533 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2535 if (adapter->closed)
2538 *ops = &iavf_flow_ops;
2543 iavf_default_rss_disable(struct iavf_adapter *adapter)
2545 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
2548 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2549 /* Set hena = 0 to ask PF to cleanup all existing RSS. */
2550 ret = iavf_set_hena(adapter, 0);
2552 /* It is a workaround, temporarily allow error to be
2553 * returned due to possible lack of PF handling for
2556 PMD_INIT_LOG(WARNING, "fail to disable default RSS,"
2562 iavf_dev_init(struct rte_eth_dev *eth_dev)
2564 struct iavf_adapter *adapter =
2565 IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
2566 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
2567 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
2568 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2571 PMD_INIT_FUNC_TRACE();
2573 /* assign ops func pointer */
2574 eth_dev->dev_ops = &iavf_eth_dev_ops;
2575 eth_dev->rx_queue_count = iavf_dev_rxq_count;
2576 eth_dev->rx_descriptor_status = iavf_dev_rx_desc_status;
2577 eth_dev->tx_descriptor_status = iavf_dev_tx_desc_status;
2578 eth_dev->rx_pkt_burst = &iavf_recv_pkts;
2579 eth_dev->tx_pkt_burst = &iavf_xmit_pkts;
2580 eth_dev->tx_pkt_prepare = &iavf_prep_pkts;
2582 /* For secondary processes, we don't initialise any further as primary
2583 * has already done this work. Only check if we need a different RX
2586 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2587 iavf_set_rx_function(eth_dev);
2588 iavf_set_tx_function(eth_dev);
2591 rte_eth_copy_pci_info(eth_dev, pci_dev);
2593 hw->vendor_id = pci_dev->id.vendor_id;
2594 hw->device_id = pci_dev->id.device_id;
2595 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2596 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
2597 hw->bus.bus_id = pci_dev->addr.bus;
2598 hw->bus.device = pci_dev->addr.devid;
2599 hw->bus.func = pci_dev->addr.function;
2600 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
2601 hw->back = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
2602 adapter->dev_data = eth_dev->data;
2603 adapter->stopped = 1;
2605 if (iavf_init_vf(eth_dev) != 0) {
2606 PMD_INIT_LOG(ERR, "Init vf failed");
2610 /* set default ptype table */
2611 iavf_set_default_ptype_table(eth_dev);
2614 eth_dev->data->mac_addrs = rte_zmalloc(
2615 "iavf_mac", RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX, 0);
2616 if (!eth_dev->data->mac_addrs) {
2617 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
2618 " store MAC addresses",
2619 RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX);
2623 /* If the MAC address is not configured by host,
2624 * generate a random one.
2626 if (!rte_is_valid_assigned_ether_addr(
2627 (struct rte_ether_addr *)hw->mac.addr))
2628 rte_eth_random_addr(hw->mac.addr);
2629 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
2630 ð_dev->data->mac_addrs[0]);
2632 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
2633 /* register callback func to eal lib */
2634 rte_intr_callback_register(pci_dev->intr_handle,
2635 iavf_dev_interrupt_handler,
2638 /* enable uio intr after callback register */
2639 rte_intr_enable(pci_dev->intr_handle);
2641 rte_eal_alarm_set(IAVF_ALARM_INTERVAL,
2642 iavf_dev_alarm_handler, eth_dev);
2645 /* configure and enable device interrupt */
2646 iavf_enable_irq0(hw);
2648 ret = iavf_flow_init(adapter);
2650 PMD_INIT_LOG(ERR, "Failed to initialize flow");
2654 /** Check if the IPsec Crypto offload is supported and create
2655 * security_ctx if it is.
2657 if (iavf_ipsec_crypto_supported(adapter)) {
2658 /* Initialize security_ctx only for primary process*/
2659 ret = iavf_security_ctx_create(adapter);
2661 PMD_INIT_LOG(ERR, "failed to create ipsec crypto security instance");
2665 ret = iavf_security_init(adapter);
2667 PMD_INIT_LOG(ERR, "failed to initialized ipsec crypto resources");
2672 iavf_default_rss_disable(adapter);
2675 /* Start device watchdog */
2676 iavf_dev_watchdog_enable(adapter);
2677 adapter->closed = false;
2682 rte_free(eth_dev->data->mac_addrs);
2683 eth_dev->data->mac_addrs = NULL;
2686 iavf_uninit_vf(eth_dev);
2692 iavf_dev_close(struct rte_eth_dev *dev)
2694 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2695 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2696 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2697 struct iavf_adapter *adapter =
2698 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2699 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2702 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2705 if (adapter->closed)
2708 ret = iavf_dev_stop(dev);
2709 adapter->closed = true;
2711 /* free iAVF security device context all related resources */
2712 iavf_security_ctx_destroy(adapter);
2714 iavf_flow_flush(dev, NULL);
2715 iavf_flow_uninit(adapter);
2718 * disable promiscuous mode before reset vf
2719 * it is a workaround solution when work with kernel driver
2720 * and it is not the normal way
2722 if (vf->promisc_unicast_enabled || vf->promisc_multicast_enabled)
2723 iavf_config_promisc(adapter, false, false);
2725 iavf_shutdown_adminq(hw);
2726 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
2727 /* disable uio intr before callback unregister */
2728 rte_intr_disable(intr_handle);
2730 /* unregister callback func from eal lib */
2731 rte_intr_callback_unregister(intr_handle,
2732 iavf_dev_interrupt_handler, dev);
2734 rte_eal_alarm_cancel(iavf_dev_alarm_handler, dev);
2736 iavf_disable_irq0(hw);
2738 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
2739 iavf_tm_conf_uninit(dev);
2741 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2743 rte_free(vf->rss_lut);
2747 rte_free(vf->rss_key);
2752 rte_free(vf->vf_res);
2756 rte_free(vf->aq_resp);
2760 * If the VF is reset via VFLR, the device will be knocked out of bus
2761 * master mode, and the driver will fail to recover from the reset. Fix
2762 * this by enabling bus mastering after every reset. In a non-VFLR case,
2763 * the bus master bit will not be disabled, and this call will have no
2766 if (vf->vf_reset && !rte_pci_set_bus_master(pci_dev, true))
2767 vf->vf_reset = false;
2769 /* disable watchdog */
2770 iavf_dev_watchdog_disable(adapter);
2776 iavf_dev_uninit(struct rte_eth_dev *dev)
2778 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2781 iavf_dev_close(dev);
2787 * Reset VF device only to re-initialize resources in PMD layer
2790 iavf_dev_reset(struct rte_eth_dev *dev)
2794 ret = iavf_dev_uninit(dev);
2798 return iavf_dev_init(dev);
2802 iavf_dcf_cap_check_handler(__rte_unused const char *key,
2803 const char *value, __rte_unused void *opaque)
2805 if (strcmp(value, "dcf"))
2812 iavf_dcf_cap_selected(struct rte_devargs *devargs)
2814 struct rte_kvargs *kvlist;
2815 const char *key = "cap";
2818 if (devargs == NULL)
2821 kvlist = rte_kvargs_parse(devargs->args, NULL);
2825 if (!rte_kvargs_count(kvlist, key))
2828 /* dcf capability selected when there's a key-value pair: cap=dcf */
2829 if (rte_kvargs_process(kvlist, key,
2830 iavf_dcf_cap_check_handler, NULL) < 0)
2836 rte_kvargs_free(kvlist);
2840 static int eth_iavf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2841 struct rte_pci_device *pci_dev)
2843 if (iavf_dcf_cap_selected(pci_dev->device.devargs))
2846 return rte_eth_dev_pci_generic_probe(pci_dev,
2847 sizeof(struct iavf_adapter), iavf_dev_init);
2850 static int eth_iavf_pci_remove(struct rte_pci_device *pci_dev)
2852 return rte_eth_dev_pci_generic_remove(pci_dev, iavf_dev_uninit);
2855 /* Adaptive virtual function driver struct */
2856 static struct rte_pci_driver rte_iavf_pmd = {
2857 .id_table = pci_id_iavf_map,
2858 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2859 .probe = eth_iavf_pci_probe,
2860 .remove = eth_iavf_pci_remove,
2863 RTE_PMD_REGISTER_PCI(net_iavf, rte_iavf_pmd);
2864 RTE_PMD_REGISTER_PCI_TABLE(net_iavf, pci_id_iavf_map);
2865 RTE_PMD_REGISTER_KMOD_DEP(net_iavf, "* igb_uio | vfio-pci");
2866 RTE_PMD_REGISTER_PARAM_STRING(net_iavf, "cap=dcf");
2867 RTE_LOG_REGISTER_SUFFIX(iavf_logtype_init, init, NOTICE);
2868 RTE_LOG_REGISTER_SUFFIX(iavf_logtype_driver, driver, NOTICE);
2869 #ifdef RTE_ETHDEV_DEBUG_RX
2870 RTE_LOG_REGISTER_SUFFIX(iavf_logtype_rx, rx, DEBUG);
2872 #ifdef RTE_ETHDEV_DEBUG_TX
2873 RTE_LOG_REGISTER_SUFFIX(iavf_logtype_tx, tx, DEBUG);