1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
16 #include <rte_interrupts.h>
17 #include <rte_debug.h>
19 #include <rte_alarm.h>
20 #include <rte_atomic.h>
22 #include <rte_ether.h>
23 #include <ethdev_driver.h>
24 #include <ethdev_pci.h>
25 #include <rte_malloc.h>
26 #include <rte_memzone.h>
30 #include "iavf_rxtx.h"
31 #include "iavf_generic_flow.h"
32 #include "rte_pmd_iavf.h"
35 #define IAVF_PROTO_XTR_ARG "proto_xtr"
37 static const char * const iavf_valid_args[] = {
42 static const struct rte_mbuf_dynfield iavf_proto_xtr_metadata_param = {
43 .name = "intel_pmd_dynfield_proto_xtr_metadata",
44 .size = sizeof(uint32_t),
45 .align = __alignof__(uint32_t),
49 struct iavf_proto_xtr_ol {
50 const struct rte_mbuf_dynflag param;
55 static struct iavf_proto_xtr_ol iavf_proto_xtr_params[] = {
56 [IAVF_PROTO_XTR_VLAN] = {
57 .param = { .name = "intel_pmd_dynflag_proto_xtr_vlan" },
58 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_vlan_mask },
59 [IAVF_PROTO_XTR_IPV4] = {
60 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv4" },
61 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask },
62 [IAVF_PROTO_XTR_IPV6] = {
63 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6" },
64 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask },
65 [IAVF_PROTO_XTR_IPV6_FLOW] = {
66 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6_flow" },
67 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask },
68 [IAVF_PROTO_XTR_TCP] = {
69 .param = { .name = "intel_pmd_dynflag_proto_xtr_tcp" },
70 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_tcp_mask },
71 [IAVF_PROTO_XTR_IP_OFFSET] = {
72 .param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
73 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask },
76 static int iavf_dev_configure(struct rte_eth_dev *dev);
77 static int iavf_dev_start(struct rte_eth_dev *dev);
78 static int iavf_dev_stop(struct rte_eth_dev *dev);
79 static int iavf_dev_close(struct rte_eth_dev *dev);
80 static int iavf_dev_reset(struct rte_eth_dev *dev);
81 static int iavf_dev_info_get(struct rte_eth_dev *dev,
82 struct rte_eth_dev_info *dev_info);
83 static const uint32_t *iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
84 static int iavf_dev_stats_get(struct rte_eth_dev *dev,
85 struct rte_eth_stats *stats);
86 static int iavf_dev_stats_reset(struct rte_eth_dev *dev);
87 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
88 struct rte_eth_xstat *xstats, unsigned int n);
89 static int iavf_dev_xstats_get_names(struct rte_eth_dev *dev,
90 struct rte_eth_xstat_name *xstats_names,
92 static int iavf_dev_promiscuous_enable(struct rte_eth_dev *dev);
93 static int iavf_dev_promiscuous_disable(struct rte_eth_dev *dev);
94 static int iavf_dev_allmulticast_enable(struct rte_eth_dev *dev);
95 static int iavf_dev_allmulticast_disable(struct rte_eth_dev *dev);
96 static int iavf_dev_add_mac_addr(struct rte_eth_dev *dev,
97 struct rte_ether_addr *addr,
100 static void iavf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index);
101 static int iavf_dev_vlan_filter_set(struct rte_eth_dev *dev,
102 uint16_t vlan_id, int on);
103 static int iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
104 static int iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
105 struct rte_eth_rss_reta_entry64 *reta_conf,
107 static int iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
108 struct rte_eth_rss_reta_entry64 *reta_conf,
110 static int iavf_dev_rss_hash_update(struct rte_eth_dev *dev,
111 struct rte_eth_rss_conf *rss_conf);
112 static int iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
113 struct rte_eth_rss_conf *rss_conf);
114 static int iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
115 static int iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
116 struct rte_ether_addr *mac_addr);
117 static int iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
119 static int iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
121 static int iavf_dev_flow_ops_get(struct rte_eth_dev *dev,
122 const struct rte_flow_ops **ops);
123 static int iavf_set_mc_addr_list(struct rte_eth_dev *dev,
124 struct rte_ether_addr *mc_addrs,
125 uint32_t mc_addrs_num);
126 static int iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg);
128 static const struct rte_pci_id pci_id_iavf_map[] = {
129 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
130 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_VF) },
131 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_VF_HV) },
132 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_X722_VF) },
133 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_X722_A0_VF) },
134 { .vendor_id = 0, /* sentinel */ },
137 struct rte_iavf_xstats_name_off {
138 char name[RTE_ETH_XSTATS_NAME_SIZE];
142 static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
143 {"rx_bytes", offsetof(struct iavf_eth_stats, rx_bytes)},
144 {"rx_unicast_packets", offsetof(struct iavf_eth_stats, rx_unicast)},
145 {"rx_multicast_packets", offsetof(struct iavf_eth_stats, rx_multicast)},
146 {"rx_broadcast_packets", offsetof(struct iavf_eth_stats, rx_broadcast)},
147 {"rx_dropped_packets", offsetof(struct iavf_eth_stats, rx_discards)},
148 {"rx_unknown_protocol_packets", offsetof(struct iavf_eth_stats,
149 rx_unknown_protocol)},
150 {"tx_bytes", offsetof(struct iavf_eth_stats, tx_bytes)},
151 {"tx_unicast_packets", offsetof(struct iavf_eth_stats, tx_unicast)},
152 {"tx_multicast_packets", offsetof(struct iavf_eth_stats, tx_multicast)},
153 {"tx_broadcast_packets", offsetof(struct iavf_eth_stats, tx_broadcast)},
154 {"tx_dropped_packets", offsetof(struct iavf_eth_stats, tx_discards)},
155 {"tx_error_packets", offsetof(struct iavf_eth_stats, tx_errors)},
158 #define IAVF_NB_XSTATS (sizeof(rte_iavf_stats_strings) / \
159 sizeof(rte_iavf_stats_strings[0]))
161 static const struct eth_dev_ops iavf_eth_dev_ops = {
162 .dev_configure = iavf_dev_configure,
163 .dev_start = iavf_dev_start,
164 .dev_stop = iavf_dev_stop,
165 .dev_close = iavf_dev_close,
166 .dev_reset = iavf_dev_reset,
167 .dev_infos_get = iavf_dev_info_get,
168 .dev_supported_ptypes_get = iavf_dev_supported_ptypes_get,
169 .link_update = iavf_dev_link_update,
170 .stats_get = iavf_dev_stats_get,
171 .stats_reset = iavf_dev_stats_reset,
172 .xstats_get = iavf_dev_xstats_get,
173 .xstats_get_names = iavf_dev_xstats_get_names,
174 .xstats_reset = iavf_dev_stats_reset,
175 .promiscuous_enable = iavf_dev_promiscuous_enable,
176 .promiscuous_disable = iavf_dev_promiscuous_disable,
177 .allmulticast_enable = iavf_dev_allmulticast_enable,
178 .allmulticast_disable = iavf_dev_allmulticast_disable,
179 .mac_addr_add = iavf_dev_add_mac_addr,
180 .mac_addr_remove = iavf_dev_del_mac_addr,
181 .set_mc_addr_list = iavf_set_mc_addr_list,
182 .vlan_filter_set = iavf_dev_vlan_filter_set,
183 .vlan_offload_set = iavf_dev_vlan_offload_set,
184 .rx_queue_start = iavf_dev_rx_queue_start,
185 .rx_queue_stop = iavf_dev_rx_queue_stop,
186 .tx_queue_start = iavf_dev_tx_queue_start,
187 .tx_queue_stop = iavf_dev_tx_queue_stop,
188 .rx_queue_setup = iavf_dev_rx_queue_setup,
189 .rx_queue_release = iavf_dev_rx_queue_release,
190 .tx_queue_setup = iavf_dev_tx_queue_setup,
191 .tx_queue_release = iavf_dev_tx_queue_release,
192 .mac_addr_set = iavf_dev_set_default_mac_addr,
193 .reta_update = iavf_dev_rss_reta_update,
194 .reta_query = iavf_dev_rss_reta_query,
195 .rss_hash_update = iavf_dev_rss_hash_update,
196 .rss_hash_conf_get = iavf_dev_rss_hash_conf_get,
197 .rxq_info_get = iavf_dev_rxq_info_get,
198 .txq_info_get = iavf_dev_txq_info_get,
199 .mtu_set = iavf_dev_mtu_set,
200 .rx_queue_intr_enable = iavf_dev_rx_queue_intr_enable,
201 .rx_queue_intr_disable = iavf_dev_rx_queue_intr_disable,
202 .flow_ops_get = iavf_dev_flow_ops_get,
203 .tx_done_cleanup = iavf_dev_tx_done_cleanup,
204 .get_monitor_addr = iavf_get_monitor_addr,
205 .tm_ops_get = iavf_tm_ops_get,
209 iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
215 *(const void **)arg = &iavf_tm_ops;
221 iavf_set_mc_addr_list(struct rte_eth_dev *dev,
222 struct rte_ether_addr *mc_addrs,
223 uint32_t mc_addrs_num)
225 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
226 struct iavf_adapter *adapter =
227 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
230 if (mc_addrs_num > IAVF_NUM_MACADDR_MAX) {
232 "can't add more than a limited number (%u) of addresses.",
233 (uint32_t)IAVF_NUM_MACADDR_MAX);
237 /* flush previous addresses */
238 err = iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
244 err = iavf_add_del_mc_addr_list(adapter, mc_addrs, mc_addrs_num, true);
247 /* if adding mac address list fails, should add the previous
250 ret = iavf_add_del_mc_addr_list(adapter, vf->mc_addrs,
251 vf->mc_addrs_num, true);
255 vf->mc_addrs_num = mc_addrs_num;
257 mc_addrs, mc_addrs_num * sizeof(*mc_addrs));
264 iavf_config_rss_hf(struct iavf_adapter *adapter, uint64_t rss_hf)
266 static const uint64_t map_hena_rss[] = {
268 [IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
269 RTE_ETH_RSS_NONFRAG_IPV4_UDP,
270 [IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
271 RTE_ETH_RSS_NONFRAG_IPV4_UDP,
272 [IAVF_FILTER_PCTYPE_NONF_IPV4_UDP] =
273 RTE_ETH_RSS_NONFRAG_IPV4_UDP,
274 [IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
275 RTE_ETH_RSS_NONFRAG_IPV4_TCP,
276 [IAVF_FILTER_PCTYPE_NONF_IPV4_TCP] =
277 RTE_ETH_RSS_NONFRAG_IPV4_TCP,
278 [IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP] =
279 RTE_ETH_RSS_NONFRAG_IPV4_SCTP,
280 [IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER] =
281 RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
282 [IAVF_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_RSS_FRAG_IPV4,
285 [IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
286 RTE_ETH_RSS_NONFRAG_IPV6_UDP,
287 [IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
288 RTE_ETH_RSS_NONFRAG_IPV6_UDP,
289 [IAVF_FILTER_PCTYPE_NONF_IPV6_UDP] =
290 RTE_ETH_RSS_NONFRAG_IPV6_UDP,
291 [IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
292 RTE_ETH_RSS_NONFRAG_IPV6_TCP,
293 [IAVF_FILTER_PCTYPE_NONF_IPV6_TCP] =
294 RTE_ETH_RSS_NONFRAG_IPV6_TCP,
295 [IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP] =
296 RTE_ETH_RSS_NONFRAG_IPV6_SCTP,
297 [IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER] =
298 RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
299 [IAVF_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_RSS_FRAG_IPV6,
302 [IAVF_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_RSS_L2_PAYLOAD
305 const uint64_t ipv4_rss = RTE_ETH_RSS_NONFRAG_IPV4_UDP |
306 RTE_ETH_RSS_NONFRAG_IPV4_TCP |
307 RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
308 RTE_ETH_RSS_NONFRAG_IPV4_OTHER |
309 RTE_ETH_RSS_FRAG_IPV4;
311 const uint64_t ipv6_rss = RTE_ETH_RSS_NONFRAG_IPV6_UDP |
312 RTE_ETH_RSS_NONFRAG_IPV6_TCP |
313 RTE_ETH_RSS_NONFRAG_IPV6_SCTP |
314 RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
315 RTE_ETH_RSS_FRAG_IPV6;
317 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
318 uint64_t caps = 0, hena = 0, valid_rss_hf = 0;
322 ret = iavf_get_hena_caps(adapter, &caps);
325 * RSS offload type configuration is not a necessary feature
326 * for VF, so here just print a warning and return.
329 "fail to get RSS offload type caps, ret: %d", ret);
334 * RTE_ETH_RSS_IPV4 and RTE_ETH_RSS_IPV6 can be considered as 2
335 * generalizations of all other IPv4 and IPv6 RSS types.
337 if (rss_hf & RTE_ETH_RSS_IPV4)
340 if (rss_hf & RTE_ETH_RSS_IPV6)
343 RTE_BUILD_BUG_ON(RTE_DIM(map_hena_rss) > sizeof(uint64_t) * CHAR_BIT);
345 for (i = 0; i < RTE_DIM(map_hena_rss); i++) {
346 uint64_t bit = BIT_ULL(i);
348 if ((caps & bit) && (map_hena_rss[i] & rss_hf)) {
349 valid_rss_hf |= map_hena_rss[i];
354 ret = iavf_set_hena(adapter, hena);
357 * RSS offload type configuration is not a necessary feature
358 * for VF, so here just print a warning and return.
361 "fail to set RSS offload types, ret: %d", ret);
365 if (valid_rss_hf & ipv4_rss)
366 valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV4;
368 if (valid_rss_hf & ipv6_rss)
369 valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV6;
371 if (rss_hf & ~valid_rss_hf)
372 PMD_DRV_LOG(WARNING, "Unsupported rss_hf 0x%" PRIx64,
373 rss_hf & ~valid_rss_hf);
375 vf->rss_hf = valid_rss_hf;
379 iavf_init_rss(struct iavf_adapter *adapter)
381 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
382 struct rte_eth_rss_conf *rss_conf;
386 rss_conf = &adapter->dev_data->dev_conf.rx_adv_conf.rss_conf;
387 nb_q = RTE_MIN(adapter->dev_data->nb_rx_queues,
388 vf->max_rss_qregion);
390 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
391 PMD_DRV_LOG(DEBUG, "RSS is not supported");
395 /* configure RSS key */
396 if (!rss_conf->rss_key) {
397 /* Calculate the default hash key */
398 for (i = 0; i < vf->vf_res->rss_key_size; i++)
399 vf->rss_key[i] = (uint8_t)rte_rand();
401 rte_memcpy(vf->rss_key, rss_conf->rss_key,
402 RTE_MIN(rss_conf->rss_key_len,
403 vf->vf_res->rss_key_size));
405 /* init RSS LUT table */
406 for (i = 0, j = 0; i < vf->vf_res->rss_lut_size; i++, j++) {
411 /* send virtchnnl ops to configure rss*/
412 ret = iavf_configure_rss_lut(adapter);
415 ret = iavf_configure_rss_key(adapter);
419 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF) {
420 /* Set RSS hash configuration based on rss_conf->rss_hf. */
421 ret = iavf_rss_hash_set(adapter, rss_conf->rss_hf, true);
423 PMD_DRV_LOG(ERR, "fail to set default RSS");
427 iavf_config_rss_hf(adapter, rss_conf->rss_hf);
434 iavf_queues_req_reset(struct rte_eth_dev *dev, uint16_t num)
436 struct iavf_adapter *ad =
437 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
438 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
441 ret = iavf_request_queues(dev, num);
443 PMD_DRV_LOG(ERR, "request queues from PF failed");
446 PMD_DRV_LOG(INFO, "change queue pairs from %u to %u",
447 vf->vsi_res->num_queue_pairs, num);
449 ret = iavf_dev_reset(dev);
451 PMD_DRV_LOG(ERR, "vf reset failed");
459 iavf_dev_vlan_insert_set(struct rte_eth_dev *dev)
461 struct iavf_adapter *adapter =
462 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
463 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
466 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2))
469 enable = !!(dev->data->dev_conf.txmode.offloads &
470 RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
471 iavf_config_vlan_insert_v2(adapter, enable);
477 iavf_dev_init_vlan(struct rte_eth_dev *dev)
481 err = iavf_dev_vlan_offload_set(dev,
482 RTE_ETH_VLAN_STRIP_MASK |
483 RTE_ETH_QINQ_STRIP_MASK |
484 RTE_ETH_VLAN_FILTER_MASK |
485 RTE_ETH_VLAN_EXTEND_MASK);
487 PMD_DRV_LOG(ERR, "Failed to update vlan offload");
491 err = iavf_dev_vlan_insert_set(dev);
493 PMD_DRV_LOG(ERR, "Failed to update vlan insertion");
499 iavf_dev_configure(struct rte_eth_dev *dev)
501 struct iavf_adapter *ad =
502 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
503 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
504 uint16_t num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
505 dev->data->nb_tx_queues);
508 ad->rx_bulk_alloc_allowed = true;
509 /* Initialize to TRUE. If any of Rx queues doesn't meet the
510 * vector Rx/Tx preconditions, it will be reset.
512 ad->rx_vec_allowed = true;
513 ad->tx_vec_allowed = true;
515 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
516 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
518 /* Large VF setting */
519 if (num_queue_pairs > IAVF_MAX_NUM_QUEUES_DFLT) {
520 if (!(vf->vf_res->vf_cap_flags &
521 VIRTCHNL_VF_LARGE_NUM_QPAIRS)) {
522 PMD_DRV_LOG(ERR, "large VF is not supported");
526 if (num_queue_pairs > IAVF_MAX_NUM_QUEUES_LV) {
527 PMD_DRV_LOG(ERR, "queue pairs number cannot be larger than %u",
528 IAVF_MAX_NUM_QUEUES_LV);
532 ret = iavf_queues_req_reset(dev, num_queue_pairs);
536 ret = iavf_get_max_rss_queue_region(ad);
538 PMD_INIT_LOG(ERR, "get max rss queue region failed");
542 vf->lv_enabled = true;
544 /* Check if large VF is already enabled. If so, disable and
545 * release redundant queue resource.
546 * Or check if enough queue pairs. If not, request them from PF.
548 if (vf->lv_enabled ||
549 num_queue_pairs > vf->vsi_res->num_queue_pairs) {
550 ret = iavf_queues_req_reset(dev, num_queue_pairs);
554 vf->lv_enabled = false;
556 /* if large VF is not required, use default rss queue region */
557 vf->max_rss_qregion = IAVF_MAX_NUM_QUEUES_DFLT;
560 ret = iavf_dev_init_vlan(dev);
562 PMD_DRV_LOG(ERR, "configure VLAN failed: %d", ret);
564 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
565 if (iavf_init_rss(ad) != 0) {
566 PMD_DRV_LOG(ERR, "configure rss failed");
574 iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
576 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
577 struct rte_eth_dev_data *dev_data = dev->data;
578 uint16_t buf_size, max_pkt_len;
579 uint32_t frame_size = dev->data->mtu + IAVF_ETH_OVERHEAD;
581 buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
583 /* Calculate the maximum packet length allowed */
584 max_pkt_len = RTE_MIN((uint32_t)
585 rxq->rx_buf_len * IAVF_MAX_CHAINED_RX_BUFFERS,
588 /* Check if maximum packet length is set correctly. */
589 if (max_pkt_len <= RTE_ETHER_MIN_LEN ||
590 max_pkt_len > IAVF_FRAME_SIZE_MAX) {
591 PMD_DRV_LOG(ERR, "maximum packet length must be "
592 "larger than %u and smaller than %u",
593 (uint32_t)IAVF_ETH_MAX_LEN,
594 (uint32_t)IAVF_FRAME_SIZE_MAX);
598 rxq->max_pkt_len = max_pkt_len;
599 if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
600 rxq->max_pkt_len > buf_size) {
601 dev_data->scattered_rx = 1;
603 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
604 IAVF_WRITE_FLUSH(hw);
610 iavf_init_queues(struct rte_eth_dev *dev)
612 struct iavf_rx_queue **rxq =
613 (struct iavf_rx_queue **)dev->data->rx_queues;
614 int i, ret = IAVF_SUCCESS;
616 for (i = 0; i < dev->data->nb_rx_queues; i++) {
617 if (!rxq[i] || !rxq[i]->q_set)
619 ret = iavf_init_rxq(dev, rxq[i]);
620 if (ret != IAVF_SUCCESS)
623 /* set rx/tx function to vector/scatter/single-segment
624 * according to parameters
626 iavf_set_rx_function(dev);
627 iavf_set_tx_function(dev);
632 static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev,
633 struct rte_intr_handle *intr_handle)
635 struct iavf_adapter *adapter =
636 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
637 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
638 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
639 struct iavf_qv_map *qv_map;
640 uint16_t interval, i;
643 if (rte_intr_cap_multiple(intr_handle) &&
644 dev->data->dev_conf.intr_conf.rxq) {
645 if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues))
649 if (rte_intr_dp_is_en(intr_handle)) {
650 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
651 dev->data->nb_rx_queues)) {
652 PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
653 dev->data->nb_rx_queues);
659 qv_map = rte_zmalloc("qv_map",
660 dev->data->nb_rx_queues * sizeof(struct iavf_qv_map), 0);
662 PMD_DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
663 dev->data->nb_rx_queues);
664 goto qv_map_alloc_err;
667 if (!dev->data->dev_conf.intr_conf.rxq ||
668 !rte_intr_dp_is_en(intr_handle)) {
669 /* Rx interrupt disabled, Map interrupt only for writeback */
671 if (vf->vf_res->vf_cap_flags &
672 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
673 /* If WB_ON_ITR supports, enable it */
674 vf->msix_base = IAVF_RX_VEC_START;
675 /* Set the ITR for index zero, to 2us to make sure that
676 * we leave time for aggregation to occur, but don't
677 * increase latency dramatically.
680 IAVF_VFINT_DYN_CTLN1(vf->msix_base - 1),
681 (0 << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
682 IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
683 (2UL << IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT));
684 /* debug - check for success! the return value
685 * should be 2, offset is 0x2800
687 /* IAVF_READ_REG(hw, IAVF_VFINT_ITRN1(0, 0)); */
689 /* If no WB_ON_ITR offload flags, need to set
690 * interrupt for descriptor write back.
692 vf->msix_base = IAVF_MISC_VEC_ID;
694 /* set ITR to default */
695 interval = iavf_calc_itr_interval(
696 IAVF_QUEUE_ITR_INTERVAL_DEFAULT);
697 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
698 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
699 (IAVF_ITR_INDEX_DEFAULT <<
700 IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
702 IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT));
704 IAVF_WRITE_FLUSH(hw);
705 /* map all queues to the same interrupt */
706 for (i = 0; i < dev->data->nb_rx_queues; i++) {
707 qv_map[i].queue_id = i;
708 qv_map[i].vector_id = vf->msix_base;
712 if (!rte_intr_allow_others(intr_handle)) {
714 vf->msix_base = IAVF_MISC_VEC_ID;
715 for (i = 0; i < dev->data->nb_rx_queues; i++) {
716 qv_map[i].queue_id = i;
717 qv_map[i].vector_id = vf->msix_base;
718 rte_intr_vec_list_index_set(intr_handle,
719 i, IAVF_MISC_VEC_ID);
723 "vector %u are mapping to all Rx queues",
726 /* If Rx interrupt is reuquired, and we can use
727 * multi interrupts, then the vec is from 1
730 RTE_MIN(rte_intr_nb_efd_get(intr_handle),
731 (uint16_t)(vf->vf_res->max_vectors - 1));
732 vf->msix_base = IAVF_RX_VEC_START;
733 vec = IAVF_RX_VEC_START;
734 for (i = 0; i < dev->data->nb_rx_queues; i++) {
735 qv_map[i].queue_id = i;
736 qv_map[i].vector_id = vec;
737 rte_intr_vec_list_index_set(intr_handle,
739 if (vec >= vf->nb_msix + IAVF_RX_VEC_START)
740 vec = IAVF_RX_VEC_START;
744 "%u vectors are mapping to %u Rx queues",
745 vf->nb_msix, dev->data->nb_rx_queues);
749 if (!vf->lv_enabled) {
750 if (iavf_config_irq_map(adapter)) {
751 PMD_DRV_LOG(ERR, "config interrupt mapping failed");
752 goto config_irq_map_err;
755 uint16_t num_qv_maps = dev->data->nb_rx_queues;
758 while (num_qv_maps > IAVF_IRQ_MAP_NUM_PER_BUF) {
759 if (iavf_config_irq_map_lv(adapter,
760 IAVF_IRQ_MAP_NUM_PER_BUF, index)) {
761 PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed");
762 goto config_irq_map_err;
764 num_qv_maps -= IAVF_IRQ_MAP_NUM_PER_BUF;
765 index += IAVF_IRQ_MAP_NUM_PER_BUF;
768 if (iavf_config_irq_map_lv(adapter, num_qv_maps, index)) {
769 PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed");
770 goto config_irq_map_err;
776 rte_free(vf->qv_map);
780 rte_intr_vec_list_free(intr_handle);
786 iavf_start_queues(struct rte_eth_dev *dev)
788 struct iavf_rx_queue *rxq;
789 struct iavf_tx_queue *txq;
792 for (i = 0; i < dev->data->nb_tx_queues; i++) {
793 txq = dev->data->tx_queues[i];
794 if (txq->tx_deferred_start)
796 if (iavf_dev_tx_queue_start(dev, i) != 0) {
797 PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
802 for (i = 0; i < dev->data->nb_rx_queues; i++) {
803 rxq = dev->data->rx_queues[i];
804 if (rxq->rx_deferred_start)
806 if (iavf_dev_rx_queue_start(dev, i) != 0) {
807 PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
816 iavf_dev_start(struct rte_eth_dev *dev)
818 struct iavf_adapter *adapter =
819 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
820 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
821 struct rte_intr_handle *intr_handle = dev->intr_handle;
822 uint16_t num_queue_pairs;
825 PMD_INIT_FUNC_TRACE();
827 adapter->stopped = 0;
829 vf->max_pkt_len = dev->data->mtu + IAVF_ETH_OVERHEAD;
830 vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
831 dev->data->nb_tx_queues);
832 num_queue_pairs = vf->num_queue_pairs;
834 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
835 if (iavf_get_qos_cap(adapter)) {
836 PMD_INIT_LOG(ERR, "Failed to get qos capability");
840 if (iavf_init_queues(dev) != 0) {
841 PMD_DRV_LOG(ERR, "failed to do Queue init");
845 /* If needed, send configure queues msg multiple times to make the
846 * adminq buffer length smaller than the 4K limitation.
848 while (num_queue_pairs > IAVF_CFG_Q_NUM_PER_BUF) {
849 if (iavf_configure_queues(adapter,
850 IAVF_CFG_Q_NUM_PER_BUF, index) != 0) {
851 PMD_DRV_LOG(ERR, "configure queues failed");
854 num_queue_pairs -= IAVF_CFG_Q_NUM_PER_BUF;
855 index += IAVF_CFG_Q_NUM_PER_BUF;
858 if (iavf_configure_queues(adapter, num_queue_pairs, index) != 0) {
859 PMD_DRV_LOG(ERR, "configure queues failed");
863 if (iavf_config_rx_queues_irqs(dev, intr_handle) != 0) {
864 PMD_DRV_LOG(ERR, "configure irq failed");
867 /* re-enable intr again, because efd assign may change */
868 if (dev->data->dev_conf.intr_conf.rxq != 0) {
869 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
870 rte_intr_disable(intr_handle);
871 rte_intr_enable(intr_handle);
874 /* Set all mac addrs */
875 iavf_add_del_all_mac_addr(adapter, true);
877 /* Set all multicast addresses */
878 iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
881 if (iavf_start_queues(dev) != 0) {
882 PMD_DRV_LOG(ERR, "enable queues failed");
889 iavf_add_del_all_mac_addr(adapter, false);
895 iavf_dev_stop(struct rte_eth_dev *dev)
897 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
898 struct iavf_adapter *adapter =
899 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
900 struct rte_intr_handle *intr_handle = dev->intr_handle;
902 PMD_INIT_FUNC_TRACE();
904 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) &&
905 dev->data->dev_conf.intr_conf.rxq != 0)
906 rte_intr_disable(intr_handle);
908 if (adapter->stopped == 1)
911 iavf_stop_queues(dev);
913 /* Disable the interrupt for Rx */
914 rte_intr_efd_disable(intr_handle);
915 /* Rx interrupt vector mapping free */
916 rte_intr_vec_list_free(intr_handle);
918 /* remove all mac addrs */
919 iavf_add_del_all_mac_addr(adapter, false);
921 /* remove all multicast addresses */
922 iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
925 adapter->stopped = 1;
926 dev->data->dev_started = 0;
932 iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
934 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
936 dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
937 dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
938 dev_info->min_rx_bufsize = IAVF_BUF_SIZE_MIN;
939 dev_info->max_rx_pktlen = IAVF_FRAME_SIZE_MAX;
940 dev_info->max_mtu = dev_info->max_rx_pktlen - IAVF_ETH_OVERHEAD;
941 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
942 dev_info->hash_key_size = vf->vf_res->rss_key_size;
943 dev_info->reta_size = vf->vf_res->rss_lut_size;
944 dev_info->flow_type_rss_offloads = IAVF_RSS_OFFLOAD_ALL;
945 dev_info->max_mac_addrs = IAVF_NUM_MACADDR_MAX;
946 dev_info->rx_offload_capa =
947 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
948 RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
949 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
950 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
951 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
952 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
953 RTE_ETH_RX_OFFLOAD_SCATTER |
954 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
955 RTE_ETH_RX_OFFLOAD_RSS_HASH;
957 dev_info->tx_offload_capa =
958 RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
959 RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
960 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
961 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
962 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
963 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
964 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
965 RTE_ETH_TX_OFFLOAD_TCP_TSO |
966 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
967 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
968 RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
969 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
970 RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
971 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
973 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
974 dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
976 dev_info->default_rxconf = (struct rte_eth_rxconf) {
977 .rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
982 dev_info->default_txconf = (struct rte_eth_txconf) {
983 .tx_free_thresh = IAVF_DEFAULT_TX_FREE_THRESH,
984 .tx_rs_thresh = IAVF_DEFAULT_TX_RS_THRESH,
988 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
989 .nb_max = IAVF_MAX_RING_DESC,
990 .nb_min = IAVF_MIN_RING_DESC,
991 .nb_align = IAVF_ALIGN_RING_DESC,
994 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
995 .nb_max = IAVF_MAX_RING_DESC,
996 .nb_min = IAVF_MIN_RING_DESC,
997 .nb_align = IAVF_ALIGN_RING_DESC,
1003 static const uint32_t *
1004 iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1006 static const uint32_t ptypes[] = {
1008 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1011 RTE_PTYPE_L4_NONFRAG,
1021 iavf_dev_link_update(struct rte_eth_dev *dev,
1022 __rte_unused int wait_to_complete)
1024 struct rte_eth_link new_link;
1025 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1027 memset(&new_link, 0, sizeof(new_link));
1029 /* Only read status info stored in VF, and the info is updated
1030 * when receive LINK_CHANGE evnet from PF by Virtchnnl.
1032 switch (vf->link_speed) {
1034 new_link.link_speed = RTE_ETH_SPEED_NUM_10M;
1037 new_link.link_speed = RTE_ETH_SPEED_NUM_100M;
1040 new_link.link_speed = RTE_ETH_SPEED_NUM_1G;
1043 new_link.link_speed = RTE_ETH_SPEED_NUM_10G;
1046 new_link.link_speed = RTE_ETH_SPEED_NUM_20G;
1049 new_link.link_speed = RTE_ETH_SPEED_NUM_25G;
1052 new_link.link_speed = RTE_ETH_SPEED_NUM_40G;
1055 new_link.link_speed = RTE_ETH_SPEED_NUM_50G;
1058 new_link.link_speed = RTE_ETH_SPEED_NUM_100G;
1061 new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1065 new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1066 new_link.link_status = vf->link_up ? RTE_ETH_LINK_UP :
1068 new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1069 RTE_ETH_LINK_SPEED_FIXED);
1071 return rte_eth_linkstatus_set(dev, &new_link);
1075 iavf_dev_promiscuous_enable(struct rte_eth_dev *dev)
1077 struct iavf_adapter *adapter =
1078 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1079 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1081 return iavf_config_promisc(adapter,
1082 true, vf->promisc_multicast_enabled);
1086 iavf_dev_promiscuous_disable(struct rte_eth_dev *dev)
1088 struct iavf_adapter *adapter =
1089 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1090 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1092 return iavf_config_promisc(adapter,
1093 false, vf->promisc_multicast_enabled);
1097 iavf_dev_allmulticast_enable(struct rte_eth_dev *dev)
1099 struct iavf_adapter *adapter =
1100 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1101 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1103 return iavf_config_promisc(adapter,
1104 vf->promisc_unicast_enabled, true);
1108 iavf_dev_allmulticast_disable(struct rte_eth_dev *dev)
1110 struct iavf_adapter *adapter =
1111 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1112 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1114 return iavf_config_promisc(adapter,
1115 vf->promisc_unicast_enabled, false);
1119 iavf_dev_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr,
1120 __rte_unused uint32_t index,
1121 __rte_unused uint32_t pool)
1123 struct iavf_adapter *adapter =
1124 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1125 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1128 if (rte_is_zero_ether_addr(addr)) {
1129 PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1133 err = iavf_add_del_eth_addr(adapter, addr, true, VIRTCHNL_ETHER_ADDR_EXTRA);
1135 PMD_DRV_LOG(ERR, "fail to add MAC address");
1145 iavf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1147 struct iavf_adapter *adapter =
1148 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1149 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1150 struct rte_ether_addr *addr;
1153 addr = &dev->data->mac_addrs[index];
1155 err = iavf_add_del_eth_addr(adapter, addr, false, VIRTCHNL_ETHER_ADDR_EXTRA);
1157 PMD_DRV_LOG(ERR, "fail to delete MAC address");
1163 iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1165 struct iavf_adapter *adapter =
1166 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1167 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1170 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
1171 err = iavf_add_del_vlan_v2(adapter, vlan_id, on);
1177 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
1180 err = iavf_add_del_vlan(adapter, vlan_id, on);
1187 iavf_iterate_vlan_filters_v2(struct rte_eth_dev *dev, bool enable)
1189 struct rte_vlan_filter_conf *vfc = &dev->data->vlan_filter_conf;
1190 struct iavf_adapter *adapter =
1191 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1195 for (i = 0; i < RTE_DIM(vfc->ids); i++) {
1196 if (vfc->ids[i] == 0)
1200 for (j = 0; ids != 0 && j < 64; j++, ids >>= 1) {
1202 iavf_add_del_vlan_v2(adapter,
1203 64 * i + j, enable);
1209 iavf_dev_vlan_offload_set_v2(struct rte_eth_dev *dev, int mask)
1211 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1212 struct iavf_adapter *adapter =
1213 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1217 if (mask & RTE_ETH_VLAN_FILTER_MASK) {
1218 enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
1220 iavf_iterate_vlan_filters_v2(dev, enable);
1223 if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1224 enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
1226 err = iavf_config_vlan_strip_v2(adapter, enable);
1227 /* If not support, the stripping is already disabled by PF */
1228 if (err == -ENOTSUP && !enable)
1238 iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1240 struct iavf_adapter *adapter =
1241 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1242 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1243 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1246 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2)
1247 return iavf_dev_vlan_offload_set_v2(dev, mask);
1249 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
1252 /* Vlan stripping setting */
1253 if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1254 /* Enable or disable VLAN stripping */
1255 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1256 err = iavf_enable_vlan_strip(adapter);
1258 err = iavf_disable_vlan_strip(adapter);
1267 iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
1268 struct rte_eth_rss_reta_entry64 *reta_conf,
1271 struct iavf_adapter *adapter =
1272 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1273 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1275 uint16_t i, idx, shift;
1278 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1281 if (reta_size != vf->vf_res->rss_lut_size) {
1282 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1283 "(%d) doesn't match the number of hardware can "
1284 "support (%d)", reta_size, vf->vf_res->rss_lut_size);
1288 lut = rte_zmalloc("rss_lut", reta_size, 0);
1290 PMD_DRV_LOG(ERR, "No memory can be allocated");
1293 /* store the old lut table temporarily */
1294 rte_memcpy(lut, vf->rss_lut, reta_size);
1296 for (i = 0; i < reta_size; i++) {
1297 idx = i / RTE_ETH_RETA_GROUP_SIZE;
1298 shift = i % RTE_ETH_RETA_GROUP_SIZE;
1299 if (reta_conf[idx].mask & (1ULL << shift))
1300 lut[i] = reta_conf[idx].reta[shift];
1303 rte_memcpy(vf->rss_lut, lut, reta_size);
1304 /* send virtchnnl ops to configure rss*/
1305 ret = iavf_configure_rss_lut(adapter);
1306 if (ret) /* revert back */
1307 rte_memcpy(vf->rss_lut, lut, reta_size);
1314 iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
1315 struct rte_eth_rss_reta_entry64 *reta_conf,
1318 struct iavf_adapter *adapter =
1319 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1320 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1321 uint16_t i, idx, shift;
1323 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1326 if (reta_size != vf->vf_res->rss_lut_size) {
1327 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1328 "(%d) doesn't match the number of hardware can "
1329 "support (%d)", reta_size, vf->vf_res->rss_lut_size);
1333 for (i = 0; i < reta_size; i++) {
1334 idx = i / RTE_ETH_RETA_GROUP_SIZE;
1335 shift = i % RTE_ETH_RETA_GROUP_SIZE;
1336 if (reta_conf[idx].mask & (1ULL << shift))
1337 reta_conf[idx].reta[shift] = vf->rss_lut[i];
1344 iavf_set_rss_key(struct iavf_adapter *adapter, uint8_t *key, uint8_t key_len)
1346 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1348 /* HENA setting, it is enabled by default, no change */
1349 if (!key || key_len == 0) {
1350 PMD_DRV_LOG(DEBUG, "No key to be configured");
1352 } else if (key_len != vf->vf_res->rss_key_size) {
1353 PMD_DRV_LOG(ERR, "The size of hash key configured "
1354 "(%d) doesn't match the size of hardware can "
1355 "support (%d)", key_len,
1356 vf->vf_res->rss_key_size);
1360 rte_memcpy(vf->rss_key, key, key_len);
1362 return iavf_configure_rss_key(adapter);
1366 iavf_dev_rss_hash_update(struct rte_eth_dev *dev,
1367 struct rte_eth_rss_conf *rss_conf)
1369 struct iavf_adapter *adapter =
1370 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1371 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1374 adapter->dev_data->dev_conf.rx_adv_conf.rss_conf = *rss_conf;
1376 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1380 ret = iavf_set_rss_key(adapter, rss_conf->rss_key,
1381 rss_conf->rss_key_len);
1385 if (rss_conf->rss_hf == 0) {
1387 ret = iavf_set_hena(adapter, 0);
1389 /* It is a workaround, temporarily allow error to be returned
1390 * due to possible lack of PF handling for hena = 0.
1393 PMD_DRV_LOG(WARNING, "fail to clean existing RSS, lack PF support");
1397 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF) {
1398 /* Clear existing RSS. */
1399 ret = iavf_set_hena(adapter, 0);
1401 /* It is a workaround, temporarily allow error to be returned
1402 * due to possible lack of PF handling for hena = 0.
1405 PMD_DRV_LOG(WARNING, "fail to clean existing RSS,"
1408 /* Set new RSS configuration. */
1409 ret = iavf_rss_hash_set(adapter, rss_conf->rss_hf, true);
1411 PMD_DRV_LOG(ERR, "fail to set new RSS");
1415 iavf_config_rss_hf(adapter, rss_conf->rss_hf);
1422 iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1423 struct rte_eth_rss_conf *rss_conf)
1425 struct iavf_adapter *adapter =
1426 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1427 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1429 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1432 rss_conf->rss_hf = vf->rss_hf;
1434 if (!rss_conf->rss_key)
1437 rss_conf->rss_key_len = vf->vf_res->rss_key_size;
1438 rte_memcpy(rss_conf->rss_key, vf->rss_key, rss_conf->rss_key_len);
1444 iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu __rte_unused)
1446 /* mtu setting is forbidden if port is start */
1447 if (dev->data->dev_started) {
1448 PMD_DRV_LOG(ERR, "port must be stopped before configuration");
1456 iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
1457 struct rte_ether_addr *mac_addr)
1459 struct iavf_adapter *adapter =
1460 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1461 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
1462 struct rte_ether_addr *old_addr;
1465 old_addr = (struct rte_ether_addr *)hw->mac.addr;
1467 if (rte_is_same_ether_addr(old_addr, mac_addr))
1470 ret = iavf_add_del_eth_addr(adapter, old_addr, false, VIRTCHNL_ETHER_ADDR_PRIMARY);
1472 PMD_DRV_LOG(ERR, "Fail to delete old MAC:"
1473 RTE_ETHER_ADDR_PRT_FMT,
1474 RTE_ETHER_ADDR_BYTES(old_addr));
1476 ret = iavf_add_del_eth_addr(adapter, mac_addr, true, VIRTCHNL_ETHER_ADDR_PRIMARY);
1478 PMD_DRV_LOG(ERR, "Fail to add new MAC:"
1479 RTE_ETHER_ADDR_PRT_FMT,
1480 RTE_ETHER_ADDR_BYTES(mac_addr));
1485 rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)hw->mac.addr);
1490 iavf_stat_update_48(uint64_t *offset, uint64_t *stat)
1492 if (*stat >= *offset)
1493 *stat = *stat - *offset;
1495 *stat = (uint64_t)((*stat +
1496 ((uint64_t)1 << IAVF_48_BIT_WIDTH)) - *offset);
1498 *stat &= IAVF_48_BIT_MASK;
1502 iavf_stat_update_32(uint64_t *offset, uint64_t *stat)
1504 if (*stat >= *offset)
1505 *stat = (uint64_t)(*stat - *offset);
1507 *stat = (uint64_t)((*stat +
1508 ((uint64_t)1 << IAVF_32_BIT_WIDTH)) - *offset);
1512 iavf_update_stats(struct iavf_vsi *vsi, struct virtchnl_eth_stats *nes)
1514 struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset;
1516 iavf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
1517 iavf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
1518 iavf_stat_update_48(&oes->rx_multicast, &nes->rx_multicast);
1519 iavf_stat_update_48(&oes->rx_broadcast, &nes->rx_broadcast);
1520 iavf_stat_update_32(&oes->rx_discards, &nes->rx_discards);
1521 iavf_stat_update_48(&oes->tx_bytes, &nes->tx_bytes);
1522 iavf_stat_update_48(&oes->tx_unicast, &nes->tx_unicast);
1523 iavf_stat_update_48(&oes->tx_multicast, &nes->tx_multicast);
1524 iavf_stat_update_48(&oes->tx_broadcast, &nes->tx_broadcast);
1525 iavf_stat_update_32(&oes->tx_errors, &nes->tx_errors);
1526 iavf_stat_update_32(&oes->tx_discards, &nes->tx_discards);
1530 iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1532 struct iavf_adapter *adapter =
1533 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1534 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1535 struct iavf_vsi *vsi = &vf->vsi;
1536 struct virtchnl_eth_stats *pstats = NULL;
1539 ret = iavf_query_stats(adapter, &pstats);
1541 uint8_t crc_stats_len = (dev->data->dev_conf.rxmode.offloads &
1542 RTE_ETH_RX_OFFLOAD_KEEP_CRC) ? 0 :
1544 iavf_update_stats(vsi, pstats);
1545 stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
1546 pstats->rx_broadcast - pstats->rx_discards;
1547 stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
1549 stats->imissed = pstats->rx_discards;
1550 stats->oerrors = pstats->tx_errors + pstats->tx_discards;
1551 stats->ibytes = pstats->rx_bytes;
1552 stats->ibytes -= stats->ipackets * crc_stats_len;
1553 stats->obytes = pstats->tx_bytes;
1555 PMD_DRV_LOG(ERR, "Get statistics failed");
1561 iavf_dev_stats_reset(struct rte_eth_dev *dev)
1564 struct iavf_adapter *adapter =
1565 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1566 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1567 struct iavf_vsi *vsi = &vf->vsi;
1568 struct virtchnl_eth_stats *pstats = NULL;
1570 /* read stat values to clear hardware registers */
1571 ret = iavf_query_stats(adapter, &pstats);
1575 /* set stats offset base on current values */
1576 vsi->eth_stats_offset = *pstats;
1581 static int iavf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1582 struct rte_eth_xstat_name *xstats_names,
1583 __rte_unused unsigned int limit)
1587 if (xstats_names != NULL)
1588 for (i = 0; i < IAVF_NB_XSTATS; i++) {
1589 snprintf(xstats_names[i].name,
1590 sizeof(xstats_names[i].name),
1591 "%s", rte_iavf_stats_strings[i].name);
1593 return IAVF_NB_XSTATS;
1596 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
1597 struct rte_eth_xstat *xstats, unsigned int n)
1601 struct iavf_adapter *adapter =
1602 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1603 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1604 struct iavf_vsi *vsi = &vf->vsi;
1605 struct virtchnl_eth_stats *pstats = NULL;
1607 if (n < IAVF_NB_XSTATS)
1608 return IAVF_NB_XSTATS;
1610 ret = iavf_query_stats(adapter, &pstats);
1617 iavf_update_stats(vsi, pstats);
1619 /* loop over xstats array and values from pstats */
1620 for (i = 0; i < IAVF_NB_XSTATS; i++) {
1622 xstats[i].value = *(uint64_t *)(((char *)pstats) +
1623 rte_iavf_stats_strings[i].offset);
1626 return IAVF_NB_XSTATS;
1631 iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1633 struct iavf_adapter *adapter =
1634 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1635 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1636 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
1637 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1640 msix_intr = rte_intr_vec_list_index_get(pci_dev->intr_handle,
1642 if (msix_intr == IAVF_MISC_VEC_ID) {
1643 PMD_DRV_LOG(INFO, "MISC is also enabled for control");
1644 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
1645 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
1646 IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
1647 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
1650 IAVF_VFINT_DYN_CTLN1
1651 (msix_intr - IAVF_RX_VEC_START),
1652 IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
1653 IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
1654 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
1657 IAVF_WRITE_FLUSH(hw);
1659 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1660 rte_intr_ack(pci_dev->intr_handle);
1666 iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1668 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1669 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1672 msix_intr = rte_intr_vec_list_index_get(pci_dev->intr_handle,
1674 if (msix_intr == IAVF_MISC_VEC_ID) {
1675 PMD_DRV_LOG(ERR, "MISC is used for control, cannot disable it");
1680 IAVF_VFINT_DYN_CTLN1(msix_intr - IAVF_RX_VEC_START),
1683 IAVF_WRITE_FLUSH(hw);
1688 iavf_check_vf_reset_done(struct iavf_hw *hw)
1692 for (i = 0; i < IAVF_RESET_WAIT_CNT; i++) {
1693 reset = IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) &
1694 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
1695 reset = reset >> IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT;
1696 if (reset == VIRTCHNL_VFR_VFACTIVE ||
1697 reset == VIRTCHNL_VFR_COMPLETED)
1702 if (i >= IAVF_RESET_WAIT_CNT)
1709 iavf_lookup_proto_xtr_type(const char *flex_name)
1713 enum iavf_proto_xtr_type type;
1714 } xtr_type_map[] = {
1715 { "vlan", IAVF_PROTO_XTR_VLAN },
1716 { "ipv4", IAVF_PROTO_XTR_IPV4 },
1717 { "ipv6", IAVF_PROTO_XTR_IPV6 },
1718 { "ipv6_flow", IAVF_PROTO_XTR_IPV6_FLOW },
1719 { "tcp", IAVF_PROTO_XTR_TCP },
1720 { "ip_offset", IAVF_PROTO_XTR_IP_OFFSET },
1724 for (i = 0; i < RTE_DIM(xtr_type_map); i++) {
1725 if (strcmp(flex_name, xtr_type_map[i].name) == 0)
1726 return xtr_type_map[i].type;
1729 PMD_DRV_LOG(ERR, "wrong proto_xtr type, "
1730 "it should be: vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset");
1736 * Parse elem, the elem could be single number/range or '(' ')' group
1737 * 1) A single number elem, it's just a simple digit. e.g. 9
1738 * 2) A single range elem, two digits with a '-' between. e.g. 2-6
1739 * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6)
1740 * Within group elem, '-' used for a range separator;
1741 * ',' used for a single number.
1744 iavf_parse_queue_set(const char *input, int xtr_type,
1745 struct iavf_devargs *devargs)
1747 const char *str = input;
1752 while (isblank(*str))
1755 if (!isdigit(*str) && *str != '(')
1758 /* process single number or single range of number */
1761 idx = strtoul(str, &end, 10);
1762 if (errno || !end || idx >= IAVF_MAX_QUEUE_NUM)
1765 while (isblank(*end))
1771 /* process single <number>-<number> */
1774 while (isblank(*end))
1780 idx = strtoul(end, &end, 10);
1781 if (errno || !end || idx >= IAVF_MAX_QUEUE_NUM)
1785 while (isblank(*end))
1792 for (idx = RTE_MIN(min, max);
1793 idx <= RTE_MAX(min, max); idx++)
1794 devargs->proto_xtr[idx] = xtr_type;
1799 /* process set within bracket */
1801 while (isblank(*str))
1806 min = IAVF_MAX_QUEUE_NUM;
1808 /* go ahead to the first digit */
1809 while (isblank(*str))
1814 /* get the digit value */
1816 idx = strtoul(str, &end, 10);
1817 if (errno || !end || idx >= IAVF_MAX_QUEUE_NUM)
1820 /* go ahead to separator '-',',' and ')' */
1821 while (isblank(*end))
1824 if (min == IAVF_MAX_QUEUE_NUM)
1826 else /* avoid continuous '-' */
1828 } else if (*end == ',' || *end == ')') {
1830 if (min == IAVF_MAX_QUEUE_NUM)
1833 for (idx = RTE_MIN(min, max);
1834 idx <= RTE_MAX(min, max); idx++)
1835 devargs->proto_xtr[idx] = xtr_type;
1837 min = IAVF_MAX_QUEUE_NUM;
1843 } while (*end != ')' && *end != '\0');
1849 iavf_parse_queue_proto_xtr(const char *queues, struct iavf_devargs *devargs)
1851 const char *queue_start;
1856 while (isblank(*queues))
1859 if (*queues != '[') {
1860 xtr_type = iavf_lookup_proto_xtr_type(queues);
1864 devargs->proto_xtr_dflt = xtr_type;
1871 while (isblank(*queues))
1873 if (*queues == '\0')
1876 queue_start = queues;
1878 /* go across a complete bracket */
1879 if (*queue_start == '(') {
1880 queues += strcspn(queues, ")");
1885 /* scan the separator ':' */
1886 queues += strcspn(queues, ":");
1887 if (*queues++ != ':')
1889 while (isblank(*queues))
1892 for (idx = 0; ; idx++) {
1893 if (isblank(queues[idx]) ||
1894 queues[idx] == ',' ||
1895 queues[idx] == ']' ||
1896 queues[idx] == '\0')
1899 if (idx > sizeof(flex_name) - 2)
1902 flex_name[idx] = queues[idx];
1904 flex_name[idx] = '\0';
1905 xtr_type = iavf_lookup_proto_xtr_type(flex_name);
1911 while (isblank(*queues) || *queues == ',' || *queues == ']')
1914 if (iavf_parse_queue_set(queue_start, xtr_type, devargs) < 0)
1916 } while (*queues != '\0');
1922 iavf_handle_proto_xtr_arg(__rte_unused const char *key, const char *value,
1925 struct iavf_devargs *devargs = extra_args;
1927 if (!value || !extra_args)
1930 if (iavf_parse_queue_proto_xtr(value, devargs) < 0) {
1931 PMD_DRV_LOG(ERR, "the proto_xtr's parameter is wrong : '%s'",
1939 static int iavf_parse_devargs(struct rte_eth_dev *dev)
1941 struct iavf_adapter *ad =
1942 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1943 struct rte_devargs *devargs = dev->device->devargs;
1944 struct rte_kvargs *kvlist;
1950 kvlist = rte_kvargs_parse(devargs->args, iavf_valid_args);
1952 PMD_INIT_LOG(ERR, "invalid kvargs key\n");
1956 ad->devargs.proto_xtr_dflt = IAVF_PROTO_XTR_NONE;
1957 memset(ad->devargs.proto_xtr, IAVF_PROTO_XTR_NONE,
1958 sizeof(ad->devargs.proto_xtr));
1960 ret = rte_kvargs_process(kvlist, IAVF_PROTO_XTR_ARG,
1961 &iavf_handle_proto_xtr_arg, &ad->devargs);
1966 rte_kvargs_free(kvlist);
1971 iavf_init_proto_xtr(struct rte_eth_dev *dev)
1973 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1974 struct iavf_adapter *ad =
1975 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1976 const struct iavf_proto_xtr_ol *xtr_ol;
1977 bool proto_xtr_enable = false;
1981 vf->proto_xtr = rte_zmalloc("vf proto xtr",
1982 vf->vsi_res->num_queue_pairs, 0);
1983 if (unlikely(!(vf->proto_xtr))) {
1984 PMD_DRV_LOG(ERR, "no memory for setting up proto_xtr's table");
1988 for (i = 0; i < vf->vsi_res->num_queue_pairs; i++) {
1989 vf->proto_xtr[i] = ad->devargs.proto_xtr[i] !=
1990 IAVF_PROTO_XTR_NONE ?
1991 ad->devargs.proto_xtr[i] :
1992 ad->devargs.proto_xtr_dflt;
1994 if (vf->proto_xtr[i] != IAVF_PROTO_XTR_NONE) {
1995 uint8_t type = vf->proto_xtr[i];
1997 iavf_proto_xtr_params[type].required = true;
1998 proto_xtr_enable = true;
2002 if (likely(!proto_xtr_enable))
2005 offset = rte_mbuf_dynfield_register(&iavf_proto_xtr_metadata_param);
2006 if (unlikely(offset == -1)) {
2008 "failed to extract protocol metadata, error %d",
2014 "proto_xtr metadata offset in mbuf is : %d",
2016 rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = offset;
2018 for (i = 0; i < RTE_DIM(iavf_proto_xtr_params); i++) {
2019 xtr_ol = &iavf_proto_xtr_params[i];
2021 uint8_t rxdid = iavf_proto_xtr_type_to_rxdid((uint8_t)i);
2023 if (!xtr_ol->required)
2026 if (!(vf->supported_rxdid & BIT(rxdid))) {
2028 "rxdid[%u] is not supported in hardware",
2030 rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
2034 offset = rte_mbuf_dynflag_register(&xtr_ol->param);
2035 if (unlikely(offset == -1)) {
2037 "failed to register proto_xtr offload '%s', error %d",
2038 xtr_ol->param.name, -rte_errno);
2040 rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
2045 "proto_xtr offload '%s' offset in mbuf is : %d",
2046 xtr_ol->param.name, offset);
2047 *xtr_ol->ol_flag = 1ULL << offset;
2052 iavf_init_vf(struct rte_eth_dev *dev)
2055 struct iavf_adapter *adapter =
2056 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2057 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2058 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2062 err = iavf_parse_devargs(dev);
2064 PMD_INIT_LOG(ERR, "Failed to parse devargs");
2068 err = iavf_set_mac_type(hw);
2070 PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
2074 err = iavf_check_vf_reset_done(hw);
2076 PMD_INIT_LOG(ERR, "VF is still resetting");
2080 iavf_init_adminq_parameter(hw);
2081 err = iavf_init_adminq(hw);
2083 PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
2087 vf->aq_resp = rte_zmalloc("vf_aq_resp", IAVF_AQ_BUF_SZ, 0);
2089 PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory");
2092 if (iavf_check_api_version(adapter) != 0) {
2093 PMD_INIT_LOG(ERR, "check_api version failed");
2097 bufsz = sizeof(struct virtchnl_vf_resource) +
2098 (IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
2099 vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
2101 PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
2105 if (iavf_get_vf_resource(adapter) != 0) {
2106 PMD_INIT_LOG(ERR, "iavf_get_vf_config failed");
2109 /* Allocate memort for RSS info */
2110 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2111 vf->rss_key = rte_zmalloc("rss_key",
2112 vf->vf_res->rss_key_size, 0);
2114 PMD_INIT_LOG(ERR, "unable to allocate rss_key memory");
2117 vf->rss_lut = rte_zmalloc("rss_lut",
2118 vf->vf_res->rss_lut_size, 0);
2120 PMD_INIT_LOG(ERR, "unable to allocate rss_lut memory");
2125 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
2126 if (iavf_get_supported_rxdid(adapter) != 0) {
2127 PMD_INIT_LOG(ERR, "failed to do get supported rxdid");
2132 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
2133 if (iavf_get_vlan_offload_caps_v2(adapter) != 0) {
2134 PMD_INIT_LOG(ERR, "failed to do get VLAN offload v2 capabilities");
2139 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS) {
2140 bufsz = sizeof(struct virtchnl_qos_cap_list) +
2141 IAVF_MAX_TRAFFIC_CLASS *
2142 sizeof(struct virtchnl_qos_cap_elem);
2143 vf->qos_cap = rte_zmalloc("qos_cap", bufsz, 0);
2145 PMD_INIT_LOG(ERR, "unable to allocate qos_cap memory");
2148 iavf_tm_conf_init(dev);
2151 iavf_init_proto_xtr(dev);
2155 rte_free(vf->rss_key);
2156 rte_free(vf->rss_lut);
2158 rte_free(vf->qos_cap);
2159 rte_free(vf->vf_res);
2162 rte_free(vf->aq_resp);
2164 iavf_shutdown_adminq(hw);
2170 iavf_uninit_vf(struct rte_eth_dev *dev)
2172 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2173 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2175 iavf_shutdown_adminq(hw);
2177 rte_free(vf->vf_res);
2181 rte_free(vf->aq_resp);
2184 rte_free(vf->qos_cap);
2187 rte_free(vf->rss_lut);
2189 rte_free(vf->rss_key);
2193 /* Enable default admin queue interrupt setting */
2195 iavf_enable_irq0(struct iavf_hw *hw)
2197 /* Enable admin queue interrupt trigger */
2198 IAVF_WRITE_REG(hw, IAVF_VFINT_ICR0_ENA1,
2199 IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
2201 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
2202 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
2203 IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
2204 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
2206 IAVF_WRITE_FLUSH(hw);
2210 iavf_disable_irq0(struct iavf_hw *hw)
2212 /* Disable all interrupt types */
2213 IAVF_WRITE_REG(hw, IAVF_VFINT_ICR0_ENA1, 0);
2214 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
2215 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
2216 IAVF_WRITE_FLUSH(hw);
2220 iavf_dev_interrupt_handler(void *param)
2222 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2223 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2225 iavf_disable_irq0(hw);
2227 iavf_handle_virtchnl_msg(dev);
2229 iavf_enable_irq0(hw);
2233 iavf_dev_alarm_handler(void *param)
2235 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2236 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2239 iavf_disable_irq0(hw);
2241 /* read out interrupt causes */
2242 icr0 = IAVF_READ_REG(hw, IAVF_VFINT_ICR01);
2244 if (icr0 & IAVF_VFINT_ICR01_ADMINQ_MASK) {
2245 PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported");
2246 iavf_handle_virtchnl_msg(dev);
2249 iavf_enable_irq0(hw);
2251 rte_eal_alarm_set(IAVF_ALARM_INTERVAL,
2252 iavf_dev_alarm_handler, dev);
2256 iavf_dev_flow_ops_get(struct rte_eth_dev *dev,
2257 const struct rte_flow_ops **ops)
2262 *ops = &iavf_flow_ops;
2267 iavf_default_rss_disable(struct iavf_adapter *adapter)
2269 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
2272 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2273 /* Set hena = 0 to ask PF to cleanup all existing RSS. */
2274 ret = iavf_set_hena(adapter, 0);
2276 /* It is a workaround, temporarily allow error to be
2277 * returned due to possible lack of PF handling for
2280 PMD_INIT_LOG(WARNING, "fail to disable default RSS,"
2286 iavf_dev_init(struct rte_eth_dev *eth_dev)
2288 struct iavf_adapter *adapter =
2289 IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
2290 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
2291 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
2292 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2295 PMD_INIT_FUNC_TRACE();
2297 /* assign ops func pointer */
2298 eth_dev->dev_ops = &iavf_eth_dev_ops;
2299 eth_dev->rx_queue_count = iavf_dev_rxq_count;
2300 eth_dev->rx_descriptor_status = iavf_dev_rx_desc_status;
2301 eth_dev->tx_descriptor_status = iavf_dev_tx_desc_status;
2302 eth_dev->rx_pkt_burst = &iavf_recv_pkts;
2303 eth_dev->tx_pkt_burst = &iavf_xmit_pkts;
2304 eth_dev->tx_pkt_prepare = &iavf_prep_pkts;
2306 /* For secondary processes, we don't initialise any further as primary
2307 * has already done this work. Only check if we need a different RX
2310 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2311 iavf_set_rx_function(eth_dev);
2312 iavf_set_tx_function(eth_dev);
2315 rte_eth_copy_pci_info(eth_dev, pci_dev);
2317 hw->vendor_id = pci_dev->id.vendor_id;
2318 hw->device_id = pci_dev->id.device_id;
2319 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2320 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
2321 hw->bus.bus_id = pci_dev->addr.bus;
2322 hw->bus.device = pci_dev->addr.devid;
2323 hw->bus.func = pci_dev->addr.function;
2324 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
2325 hw->back = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
2326 adapter->dev_data = eth_dev->data;
2327 adapter->stopped = 1;
2329 if (iavf_init_vf(eth_dev) != 0) {
2330 PMD_INIT_LOG(ERR, "Init vf failed");
2334 /* set default ptype table */
2335 iavf_set_default_ptype_table(eth_dev);
2338 eth_dev->data->mac_addrs = rte_zmalloc(
2339 "iavf_mac", RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX, 0);
2340 if (!eth_dev->data->mac_addrs) {
2341 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
2342 " store MAC addresses",
2343 RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX);
2347 /* If the MAC address is not configured by host,
2348 * generate a random one.
2350 if (!rte_is_valid_assigned_ether_addr(
2351 (struct rte_ether_addr *)hw->mac.addr))
2352 rte_eth_random_addr(hw->mac.addr);
2353 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
2354 ð_dev->data->mac_addrs[0]);
2356 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
2357 /* register callback func to eal lib */
2358 rte_intr_callback_register(pci_dev->intr_handle,
2359 iavf_dev_interrupt_handler,
2362 /* enable uio intr after callback register */
2363 rte_intr_enable(pci_dev->intr_handle);
2365 rte_eal_alarm_set(IAVF_ALARM_INTERVAL,
2366 iavf_dev_alarm_handler, eth_dev);
2369 /* configure and enable device interrupt */
2370 iavf_enable_irq0(hw);
2372 ret = iavf_flow_init(adapter);
2374 PMD_INIT_LOG(ERR, "Failed to initialize flow");
2378 iavf_default_rss_disable(adapter);
2383 rte_free(eth_dev->data->mac_addrs);
2384 eth_dev->data->mac_addrs = NULL;
2387 iavf_uninit_vf(eth_dev);
2393 iavf_dev_close(struct rte_eth_dev *dev)
2395 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2396 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2397 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2398 struct iavf_adapter *adapter =
2399 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2400 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2403 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2406 ret = iavf_dev_stop(dev);
2408 iavf_flow_flush(dev, NULL);
2409 iavf_flow_uninit(adapter);
2412 * disable promiscuous mode before reset vf
2413 * it is a workaround solution when work with kernel driver
2414 * and it is not the normal way
2416 if (vf->promisc_unicast_enabled || vf->promisc_multicast_enabled)
2417 iavf_config_promisc(adapter, false, false);
2419 iavf_shutdown_adminq(hw);
2420 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
2421 /* disable uio intr before callback unregister */
2422 rte_intr_disable(intr_handle);
2424 /* unregister callback func from eal lib */
2425 rte_intr_callback_unregister(intr_handle,
2426 iavf_dev_interrupt_handler, dev);
2428 rte_eal_alarm_cancel(iavf_dev_alarm_handler, dev);
2430 iavf_disable_irq0(hw);
2432 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
2433 iavf_tm_conf_uninit(dev);
2435 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2437 rte_free(vf->rss_lut);
2441 rte_free(vf->rss_key);
2446 rte_free(vf->vf_res);
2450 rte_free(vf->aq_resp);
2454 * If the VF is reset via VFLR, the device will be knocked out of bus
2455 * master mode, and the driver will fail to recover from the reset. Fix
2456 * this by enabling bus mastering after every reset. In a non-VFLR case,
2457 * the bus master bit will not be disabled, and this call will have no
2460 if (vf->vf_reset && !rte_pci_set_bus_master(pci_dev, true))
2461 vf->vf_reset = false;
2467 iavf_dev_uninit(struct rte_eth_dev *dev)
2469 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2472 iavf_dev_close(dev);
2478 * Reset VF device only to re-initialize resources in PMD layer
2481 iavf_dev_reset(struct rte_eth_dev *dev)
2485 ret = iavf_dev_uninit(dev);
2489 return iavf_dev_init(dev);
2493 iavf_dcf_cap_check_handler(__rte_unused const char *key,
2494 const char *value, __rte_unused void *opaque)
2496 if (strcmp(value, "dcf"))
2503 iavf_dcf_cap_selected(struct rte_devargs *devargs)
2505 struct rte_kvargs *kvlist;
2506 const char *key = "cap";
2509 if (devargs == NULL)
2512 kvlist = rte_kvargs_parse(devargs->args, NULL);
2516 if (!rte_kvargs_count(kvlist, key))
2519 /* dcf capability selected when there's a key-value pair: cap=dcf */
2520 if (rte_kvargs_process(kvlist, key,
2521 iavf_dcf_cap_check_handler, NULL) < 0)
2527 rte_kvargs_free(kvlist);
2531 static int eth_iavf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2532 struct rte_pci_device *pci_dev)
2534 if (iavf_dcf_cap_selected(pci_dev->device.devargs))
2537 return rte_eth_dev_pci_generic_probe(pci_dev,
2538 sizeof(struct iavf_adapter), iavf_dev_init);
2541 static int eth_iavf_pci_remove(struct rte_pci_device *pci_dev)
2543 return rte_eth_dev_pci_generic_remove(pci_dev, iavf_dev_uninit);
2546 /* Adaptive virtual function driver struct */
2547 static struct rte_pci_driver rte_iavf_pmd = {
2548 .id_table = pci_id_iavf_map,
2549 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2550 .probe = eth_iavf_pci_probe,
2551 .remove = eth_iavf_pci_remove,
2554 RTE_PMD_REGISTER_PCI(net_iavf, rte_iavf_pmd);
2555 RTE_PMD_REGISTER_PCI_TABLE(net_iavf, pci_id_iavf_map);
2556 RTE_PMD_REGISTER_KMOD_DEP(net_iavf, "* igb_uio | vfio-pci");
2557 RTE_PMD_REGISTER_PARAM_STRING(net_iavf, "cap=dcf");
2558 RTE_LOG_REGISTER_SUFFIX(iavf_logtype_init, init, NOTICE);
2559 RTE_LOG_REGISTER_SUFFIX(iavf_logtype_driver, driver, NOTICE);
2560 #ifdef RTE_ETHDEV_DEBUG_RX
2561 RTE_LOG_REGISTER_SUFFIX(iavf_logtype_rx, rx, DEBUG);
2563 #ifdef RTE_ETHDEV_DEBUG_TX
2564 RTE_LOG_REGISTER_SUFFIX(iavf_logtype_tx, tx, DEBUG);