1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
16 #include <rte_interrupts.h>
17 #include <rte_debug.h>
19 #include <rte_alarm.h>
20 #include <rte_atomic.h>
22 #include <rte_ether.h>
23 #include <ethdev_driver.h>
24 #include <ethdev_pci.h>
25 #include <rte_malloc.h>
26 #include <rte_memzone.h>
30 #include "iavf_rxtx.h"
31 #include "iavf_generic_flow.h"
32 #include "rte_pmd_iavf.h"
35 #define IAVF_PROTO_XTR_ARG "proto_xtr"
37 static const char * const iavf_valid_args[] = {
42 static const struct rte_mbuf_dynfield iavf_proto_xtr_metadata_param = {
43 .name = "intel_pmd_dynfield_proto_xtr_metadata",
44 .size = sizeof(uint32_t),
45 .align = __alignof__(uint32_t),
49 struct iavf_proto_xtr_ol {
50 const struct rte_mbuf_dynflag param;
55 static struct iavf_proto_xtr_ol iavf_proto_xtr_params[] = {
56 [IAVF_PROTO_XTR_VLAN] = {
57 .param = { .name = "intel_pmd_dynflag_proto_xtr_vlan" },
58 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_vlan_mask },
59 [IAVF_PROTO_XTR_IPV4] = {
60 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv4" },
61 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask },
62 [IAVF_PROTO_XTR_IPV6] = {
63 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6" },
64 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask },
65 [IAVF_PROTO_XTR_IPV6_FLOW] = {
66 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6_flow" },
67 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask },
68 [IAVF_PROTO_XTR_TCP] = {
69 .param = { .name = "intel_pmd_dynflag_proto_xtr_tcp" },
70 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_tcp_mask },
71 [IAVF_PROTO_XTR_IP_OFFSET] = {
72 .param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
73 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask },
76 static int iavf_dev_configure(struct rte_eth_dev *dev);
77 static int iavf_dev_start(struct rte_eth_dev *dev);
78 static int iavf_dev_stop(struct rte_eth_dev *dev);
79 static int iavf_dev_close(struct rte_eth_dev *dev);
80 static int iavf_dev_reset(struct rte_eth_dev *dev);
81 static int iavf_dev_info_get(struct rte_eth_dev *dev,
82 struct rte_eth_dev_info *dev_info);
83 static const uint32_t *iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
84 static int iavf_dev_stats_get(struct rte_eth_dev *dev,
85 struct rte_eth_stats *stats);
86 static int iavf_dev_stats_reset(struct rte_eth_dev *dev);
87 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
88 struct rte_eth_xstat *xstats, unsigned int n);
89 static int iavf_dev_xstats_get_names(struct rte_eth_dev *dev,
90 struct rte_eth_xstat_name *xstats_names,
92 static int iavf_dev_promiscuous_enable(struct rte_eth_dev *dev);
93 static int iavf_dev_promiscuous_disable(struct rte_eth_dev *dev);
94 static int iavf_dev_allmulticast_enable(struct rte_eth_dev *dev);
95 static int iavf_dev_allmulticast_disable(struct rte_eth_dev *dev);
96 static int iavf_dev_add_mac_addr(struct rte_eth_dev *dev,
97 struct rte_ether_addr *addr,
100 static void iavf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index);
101 static int iavf_dev_vlan_filter_set(struct rte_eth_dev *dev,
102 uint16_t vlan_id, int on);
103 static int iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
104 static int iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
105 struct rte_eth_rss_reta_entry64 *reta_conf,
107 static int iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
108 struct rte_eth_rss_reta_entry64 *reta_conf,
110 static int iavf_dev_rss_hash_update(struct rte_eth_dev *dev,
111 struct rte_eth_rss_conf *rss_conf);
112 static int iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
113 struct rte_eth_rss_conf *rss_conf);
114 static int iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
115 static int iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
116 struct rte_ether_addr *mac_addr);
117 static int iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
119 static int iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
121 static int iavf_dev_flow_ops_get(struct rte_eth_dev *dev,
122 const struct rte_flow_ops **ops);
123 static int iavf_set_mc_addr_list(struct rte_eth_dev *dev,
124 struct rte_ether_addr *mc_addrs,
125 uint32_t mc_addrs_num);
126 static int iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg);
128 static const struct rte_pci_id pci_id_iavf_map[] = {
129 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
130 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_VF) },
131 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_VF_HV) },
132 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_X722_VF) },
133 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_X722_A0_VF) },
134 { .vendor_id = 0, /* sentinel */ },
137 struct rte_iavf_xstats_name_off {
138 char name[RTE_ETH_XSTATS_NAME_SIZE];
142 static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
143 {"rx_bytes", offsetof(struct iavf_eth_stats, rx_bytes)},
144 {"rx_unicast_packets", offsetof(struct iavf_eth_stats, rx_unicast)},
145 {"rx_multicast_packets", offsetof(struct iavf_eth_stats, rx_multicast)},
146 {"rx_broadcast_packets", offsetof(struct iavf_eth_stats, rx_broadcast)},
147 {"rx_dropped_packets", offsetof(struct iavf_eth_stats, rx_discards)},
148 {"rx_unknown_protocol_packets", offsetof(struct iavf_eth_stats,
149 rx_unknown_protocol)},
150 {"tx_bytes", offsetof(struct iavf_eth_stats, tx_bytes)},
151 {"tx_unicast_packets", offsetof(struct iavf_eth_stats, tx_unicast)},
152 {"tx_multicast_packets", offsetof(struct iavf_eth_stats, tx_multicast)},
153 {"tx_broadcast_packets", offsetof(struct iavf_eth_stats, tx_broadcast)},
154 {"tx_dropped_packets", offsetof(struct iavf_eth_stats, tx_discards)},
155 {"tx_error_packets", offsetof(struct iavf_eth_stats, tx_errors)},
158 #define IAVF_NB_XSTATS (sizeof(rte_iavf_stats_strings) / \
159 sizeof(rte_iavf_stats_strings[0]))
161 static const struct eth_dev_ops iavf_eth_dev_ops = {
162 .dev_configure = iavf_dev_configure,
163 .dev_start = iavf_dev_start,
164 .dev_stop = iavf_dev_stop,
165 .dev_close = iavf_dev_close,
166 .dev_reset = iavf_dev_reset,
167 .dev_infos_get = iavf_dev_info_get,
168 .dev_supported_ptypes_get = iavf_dev_supported_ptypes_get,
169 .link_update = iavf_dev_link_update,
170 .stats_get = iavf_dev_stats_get,
171 .stats_reset = iavf_dev_stats_reset,
172 .xstats_get = iavf_dev_xstats_get,
173 .xstats_get_names = iavf_dev_xstats_get_names,
174 .xstats_reset = iavf_dev_stats_reset,
175 .promiscuous_enable = iavf_dev_promiscuous_enable,
176 .promiscuous_disable = iavf_dev_promiscuous_disable,
177 .allmulticast_enable = iavf_dev_allmulticast_enable,
178 .allmulticast_disable = iavf_dev_allmulticast_disable,
179 .mac_addr_add = iavf_dev_add_mac_addr,
180 .mac_addr_remove = iavf_dev_del_mac_addr,
181 .set_mc_addr_list = iavf_set_mc_addr_list,
182 .vlan_filter_set = iavf_dev_vlan_filter_set,
183 .vlan_offload_set = iavf_dev_vlan_offload_set,
184 .rx_queue_start = iavf_dev_rx_queue_start,
185 .rx_queue_stop = iavf_dev_rx_queue_stop,
186 .tx_queue_start = iavf_dev_tx_queue_start,
187 .tx_queue_stop = iavf_dev_tx_queue_stop,
188 .rx_queue_setup = iavf_dev_rx_queue_setup,
189 .rx_queue_release = iavf_dev_rx_queue_release,
190 .tx_queue_setup = iavf_dev_tx_queue_setup,
191 .tx_queue_release = iavf_dev_tx_queue_release,
192 .mac_addr_set = iavf_dev_set_default_mac_addr,
193 .reta_update = iavf_dev_rss_reta_update,
194 .reta_query = iavf_dev_rss_reta_query,
195 .rss_hash_update = iavf_dev_rss_hash_update,
196 .rss_hash_conf_get = iavf_dev_rss_hash_conf_get,
197 .rxq_info_get = iavf_dev_rxq_info_get,
198 .txq_info_get = iavf_dev_txq_info_get,
199 .mtu_set = iavf_dev_mtu_set,
200 .rx_queue_intr_enable = iavf_dev_rx_queue_intr_enable,
201 .rx_queue_intr_disable = iavf_dev_rx_queue_intr_disable,
202 .flow_ops_get = iavf_dev_flow_ops_get,
203 .tx_done_cleanup = iavf_dev_tx_done_cleanup,
204 .get_monitor_addr = iavf_get_monitor_addr,
205 .tm_ops_get = iavf_tm_ops_get,
209 iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
215 *(const void **)arg = &iavf_tm_ops;
221 iavf_set_mc_addr_list(struct rte_eth_dev *dev,
222 struct rte_ether_addr *mc_addrs,
223 uint32_t mc_addrs_num)
225 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
226 struct iavf_adapter *adapter =
227 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
230 if (mc_addrs_num > IAVF_NUM_MACADDR_MAX) {
232 "can't add more than a limited number (%u) of addresses.",
233 (uint32_t)IAVF_NUM_MACADDR_MAX);
237 /* flush previous addresses */
238 err = iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
244 err = iavf_add_del_mc_addr_list(adapter, mc_addrs, mc_addrs_num, true);
247 /* if adding mac address list fails, should add the previous
250 ret = iavf_add_del_mc_addr_list(adapter, vf->mc_addrs,
251 vf->mc_addrs_num, true);
255 vf->mc_addrs_num = mc_addrs_num;
257 mc_addrs, mc_addrs_num * sizeof(*mc_addrs));
264 iavf_config_rss_hf(struct iavf_adapter *adapter, uint64_t rss_hf)
266 static const uint64_t map_hena_rss[] = {
268 [IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
269 ETH_RSS_NONFRAG_IPV4_UDP,
270 [IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
271 ETH_RSS_NONFRAG_IPV4_UDP,
272 [IAVF_FILTER_PCTYPE_NONF_IPV4_UDP] =
273 ETH_RSS_NONFRAG_IPV4_UDP,
274 [IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
275 ETH_RSS_NONFRAG_IPV4_TCP,
276 [IAVF_FILTER_PCTYPE_NONF_IPV4_TCP] =
277 ETH_RSS_NONFRAG_IPV4_TCP,
278 [IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP] =
279 ETH_RSS_NONFRAG_IPV4_SCTP,
280 [IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER] =
281 ETH_RSS_NONFRAG_IPV4_OTHER,
282 [IAVF_FILTER_PCTYPE_FRAG_IPV4] = ETH_RSS_FRAG_IPV4,
285 [IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
286 ETH_RSS_NONFRAG_IPV6_UDP,
287 [IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
288 ETH_RSS_NONFRAG_IPV6_UDP,
289 [IAVF_FILTER_PCTYPE_NONF_IPV6_UDP] =
290 ETH_RSS_NONFRAG_IPV6_UDP,
291 [IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
292 ETH_RSS_NONFRAG_IPV6_TCP,
293 [IAVF_FILTER_PCTYPE_NONF_IPV6_TCP] =
294 ETH_RSS_NONFRAG_IPV6_TCP,
295 [IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP] =
296 ETH_RSS_NONFRAG_IPV6_SCTP,
297 [IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER] =
298 ETH_RSS_NONFRAG_IPV6_OTHER,
299 [IAVF_FILTER_PCTYPE_FRAG_IPV6] = ETH_RSS_FRAG_IPV6,
302 [IAVF_FILTER_PCTYPE_L2_PAYLOAD] = ETH_RSS_L2_PAYLOAD
305 const uint64_t ipv4_rss = ETH_RSS_NONFRAG_IPV4_UDP |
306 ETH_RSS_NONFRAG_IPV4_TCP |
307 ETH_RSS_NONFRAG_IPV4_SCTP |
308 ETH_RSS_NONFRAG_IPV4_OTHER |
311 const uint64_t ipv6_rss = ETH_RSS_NONFRAG_IPV6_UDP |
312 ETH_RSS_NONFRAG_IPV6_TCP |
313 ETH_RSS_NONFRAG_IPV6_SCTP |
314 ETH_RSS_NONFRAG_IPV6_OTHER |
317 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
318 uint64_t caps = 0, hena = 0, valid_rss_hf = 0;
322 ret = iavf_get_hena_caps(adapter, &caps);
325 * RSS offload type configuration is not a necessary feature
326 * for VF, so here just print a warning and return.
329 "fail to get RSS offload type caps, ret: %d", ret);
334 * ETH_RSS_IPV4 and ETH_RSS_IPV6 can be considered as 2
335 * generalizations of all other IPv4 and IPv6 RSS types.
337 if (rss_hf & ETH_RSS_IPV4)
340 if (rss_hf & ETH_RSS_IPV6)
343 RTE_BUILD_BUG_ON(RTE_DIM(map_hena_rss) > sizeof(uint64_t) * CHAR_BIT);
345 for (i = 0; i < RTE_DIM(map_hena_rss); i++) {
346 uint64_t bit = BIT_ULL(i);
348 if ((caps & bit) && (map_hena_rss[i] & rss_hf)) {
349 valid_rss_hf |= map_hena_rss[i];
354 ret = iavf_set_hena(adapter, hena);
357 * RSS offload type configuration is not a necessary feature
358 * for VF, so here just print a warning and return.
361 "fail to set RSS offload types, ret: %d", ret);
365 if (valid_rss_hf & ipv4_rss)
366 valid_rss_hf |= rss_hf & ETH_RSS_IPV4;
368 if (valid_rss_hf & ipv6_rss)
369 valid_rss_hf |= rss_hf & ETH_RSS_IPV6;
371 if (rss_hf & ~valid_rss_hf)
372 PMD_DRV_LOG(WARNING, "Unsupported rss_hf 0x%" PRIx64,
373 rss_hf & ~valid_rss_hf);
375 vf->rss_hf = valid_rss_hf;
379 iavf_init_rss(struct iavf_adapter *adapter)
381 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
382 struct rte_eth_rss_conf *rss_conf;
386 rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
387 nb_q = RTE_MIN(adapter->eth_dev->data->nb_rx_queues,
388 vf->max_rss_qregion);
390 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
391 PMD_DRV_LOG(DEBUG, "RSS is not supported");
395 /* configure RSS key */
396 if (!rss_conf->rss_key) {
397 /* Calculate the default hash key */
398 for (i = 0; i < vf->vf_res->rss_key_size; i++)
399 vf->rss_key[i] = (uint8_t)rte_rand();
401 rte_memcpy(vf->rss_key, rss_conf->rss_key,
402 RTE_MIN(rss_conf->rss_key_len,
403 vf->vf_res->rss_key_size));
405 /* init RSS LUT table */
406 for (i = 0, j = 0; i < vf->vf_res->rss_lut_size; i++, j++) {
411 /* send virtchnnl ops to configure rss*/
412 ret = iavf_configure_rss_lut(adapter);
415 ret = iavf_configure_rss_key(adapter);
419 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF) {
420 /* Set RSS hash configuration based on rss_conf->rss_hf. */
421 ret = iavf_rss_hash_set(adapter, rss_conf->rss_hf, true);
423 PMD_DRV_LOG(ERR, "fail to set default RSS");
427 iavf_config_rss_hf(adapter, rss_conf->rss_hf);
434 iavf_queues_req_reset(struct rte_eth_dev *dev, uint16_t num)
436 struct iavf_adapter *ad =
437 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
438 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
441 ret = iavf_request_queues(ad, num);
443 PMD_DRV_LOG(ERR, "request queues from PF failed");
446 PMD_DRV_LOG(INFO, "change queue pairs from %u to %u",
447 vf->vsi_res->num_queue_pairs, num);
449 ret = iavf_dev_reset(dev);
451 PMD_DRV_LOG(ERR, "vf reset failed");
459 iavf_dev_vlan_insert_set(struct rte_eth_dev *dev)
461 struct iavf_adapter *adapter =
462 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
463 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
466 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2))
469 enable = !!(dev->data->dev_conf.txmode.offloads &
470 DEV_TX_OFFLOAD_VLAN_INSERT);
471 iavf_config_vlan_insert_v2(adapter, enable);
477 iavf_dev_init_vlan(struct rte_eth_dev *dev)
481 err = iavf_dev_vlan_offload_set(dev,
482 ETH_VLAN_STRIP_MASK |
483 ETH_QINQ_STRIP_MASK |
484 ETH_VLAN_FILTER_MASK |
485 ETH_VLAN_EXTEND_MASK);
487 PMD_DRV_LOG(ERR, "Failed to update vlan offload");
491 err = iavf_dev_vlan_insert_set(dev);
493 PMD_DRV_LOG(ERR, "Failed to update vlan insertion");
499 iavf_dev_configure(struct rte_eth_dev *dev)
501 struct iavf_adapter *ad =
502 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
503 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
504 uint16_t num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
505 dev->data->nb_tx_queues);
508 ad->rx_bulk_alloc_allowed = true;
509 /* Initialize to TRUE. If any of Rx queues doesn't meet the
510 * vector Rx/Tx preconditions, it will be reset.
512 ad->rx_vec_allowed = true;
513 ad->tx_vec_allowed = true;
515 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
516 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
518 /* Large VF setting */
519 if (num_queue_pairs > IAVF_MAX_NUM_QUEUES_DFLT) {
520 if (!(vf->vf_res->vf_cap_flags &
521 VIRTCHNL_VF_LARGE_NUM_QPAIRS)) {
522 PMD_DRV_LOG(ERR, "large VF is not supported");
526 if (num_queue_pairs > IAVF_MAX_NUM_QUEUES_LV) {
527 PMD_DRV_LOG(ERR, "queue pairs number cannot be larger than %u",
528 IAVF_MAX_NUM_QUEUES_LV);
532 ret = iavf_queues_req_reset(dev, num_queue_pairs);
536 ret = iavf_get_max_rss_queue_region(ad);
538 PMD_INIT_LOG(ERR, "get max rss queue region failed");
542 vf->lv_enabled = true;
544 /* Check if large VF is already enabled. If so, disable and
545 * release redundant queue resource.
546 * Or check if enough queue pairs. If not, request them from PF.
548 if (vf->lv_enabled ||
549 num_queue_pairs > vf->vsi_res->num_queue_pairs) {
550 ret = iavf_queues_req_reset(dev, num_queue_pairs);
554 vf->lv_enabled = false;
556 /* if large VF is not required, use default rss queue region */
557 vf->max_rss_qregion = IAVF_MAX_NUM_QUEUES_DFLT;
560 ret = iavf_dev_init_vlan(dev);
562 PMD_DRV_LOG(ERR, "configure VLAN failed: %d", ret);
564 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
565 if (iavf_init_rss(ad) != 0) {
566 PMD_DRV_LOG(ERR, "configure rss failed");
574 iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
576 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
577 struct rte_eth_dev_data *dev_data = dev->data;
578 uint16_t buf_size, max_pkt_len;
580 buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
582 /* Calculate the maximum packet length allowed */
583 max_pkt_len = RTE_MIN((uint32_t)
584 rxq->rx_buf_len * IAVF_MAX_CHAINED_RX_BUFFERS,
585 dev->data->dev_conf.rxmode.max_rx_pkt_len);
587 /* Check if the jumbo frame and maximum packet length are set
590 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
591 if (max_pkt_len <= IAVF_ETH_MAX_LEN ||
592 max_pkt_len > IAVF_FRAME_SIZE_MAX) {
593 PMD_DRV_LOG(ERR, "maximum packet length must be "
594 "larger than %u and smaller than %u, "
595 "as jumbo frame is enabled",
596 (uint32_t)IAVF_ETH_MAX_LEN,
597 (uint32_t)IAVF_FRAME_SIZE_MAX);
601 if (max_pkt_len < RTE_ETHER_MIN_LEN ||
602 max_pkt_len > IAVF_ETH_MAX_LEN) {
603 PMD_DRV_LOG(ERR, "maximum packet length must be "
604 "larger than %u and smaller than %u, "
605 "as jumbo frame is disabled",
606 (uint32_t)RTE_ETHER_MIN_LEN,
607 (uint32_t)IAVF_ETH_MAX_LEN);
612 rxq->max_pkt_len = max_pkt_len;
613 if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
614 rxq->max_pkt_len > buf_size) {
615 dev_data->scattered_rx = 1;
617 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
618 IAVF_WRITE_FLUSH(hw);
624 iavf_init_queues(struct rte_eth_dev *dev)
626 struct iavf_rx_queue **rxq =
627 (struct iavf_rx_queue **)dev->data->rx_queues;
628 int i, ret = IAVF_SUCCESS;
630 for (i = 0; i < dev->data->nb_rx_queues; i++) {
631 if (!rxq[i] || !rxq[i]->q_set)
633 ret = iavf_init_rxq(dev, rxq[i]);
634 if (ret != IAVF_SUCCESS)
637 /* set rx/tx function to vector/scatter/single-segment
638 * according to parameters
640 iavf_set_rx_function(dev);
641 iavf_set_tx_function(dev);
646 static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev,
647 struct rte_intr_handle *intr_handle)
649 struct iavf_adapter *adapter =
650 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
651 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
652 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
653 struct iavf_qv_map *qv_map;
654 uint16_t interval, i;
657 if (rte_intr_cap_multiple(intr_handle) &&
658 dev->data->dev_conf.intr_conf.rxq) {
659 if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues))
663 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
664 intr_handle->intr_vec =
665 rte_zmalloc("intr_vec",
666 dev->data->nb_rx_queues * sizeof(int), 0);
667 if (!intr_handle->intr_vec) {
668 PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
669 dev->data->nb_rx_queues);
674 qv_map = rte_zmalloc("qv_map",
675 dev->data->nb_rx_queues * sizeof(struct iavf_qv_map), 0);
677 PMD_DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
678 dev->data->nb_rx_queues);
682 if (!dev->data->dev_conf.intr_conf.rxq ||
683 !rte_intr_dp_is_en(intr_handle)) {
684 /* Rx interrupt disabled, Map interrupt only for writeback */
686 if (vf->vf_res->vf_cap_flags &
687 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
688 /* If WB_ON_ITR supports, enable it */
689 vf->msix_base = IAVF_RX_VEC_START;
690 /* Set the ITR for index zero, to 2us to make sure that
691 * we leave time for aggregation to occur, but don't
692 * increase latency dramatically.
695 IAVF_VFINT_DYN_CTLN1(vf->msix_base - 1),
696 (0 << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
697 IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
698 (2UL << IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT));
699 /* debug - check for success! the return value
700 * should be 2, offset is 0x2800
702 /* IAVF_READ_REG(hw, IAVF_VFINT_ITRN1(0, 0)); */
704 /* If no WB_ON_ITR offload flags, need to set
705 * interrupt for descriptor write back.
707 vf->msix_base = IAVF_MISC_VEC_ID;
709 /* set ITR to default */
710 interval = iavf_calc_itr_interval(
711 IAVF_QUEUE_ITR_INTERVAL_DEFAULT);
712 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
713 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
714 (IAVF_ITR_INDEX_DEFAULT <<
715 IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
717 IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT));
719 IAVF_WRITE_FLUSH(hw);
720 /* map all queues to the same interrupt */
721 for (i = 0; i < dev->data->nb_rx_queues; i++) {
722 qv_map[i].queue_id = i;
723 qv_map[i].vector_id = vf->msix_base;
727 if (!rte_intr_allow_others(intr_handle)) {
729 vf->msix_base = IAVF_MISC_VEC_ID;
730 for (i = 0; i < dev->data->nb_rx_queues; i++) {
731 qv_map[i].queue_id = i;
732 qv_map[i].vector_id = vf->msix_base;
733 intr_handle->intr_vec[i] = IAVF_MISC_VEC_ID;
737 "vector %u are mapping to all Rx queues",
740 /* If Rx interrupt is reuquired, and we can use
741 * multi interrupts, then the vec is from 1
743 vf->nb_msix = RTE_MIN(intr_handle->nb_efd,
744 (uint16_t)(vf->vf_res->max_vectors - 1));
745 vf->msix_base = IAVF_RX_VEC_START;
746 vec = IAVF_RX_VEC_START;
747 for (i = 0; i < dev->data->nb_rx_queues; i++) {
748 qv_map[i].queue_id = i;
749 qv_map[i].vector_id = vec;
750 intr_handle->intr_vec[i] = vec++;
751 if (vec >= vf->nb_msix + IAVF_RX_VEC_START)
752 vec = IAVF_RX_VEC_START;
756 "%u vectors are mapping to %u Rx queues",
757 vf->nb_msix, dev->data->nb_rx_queues);
761 if (!vf->lv_enabled) {
762 if (iavf_config_irq_map(adapter)) {
763 PMD_DRV_LOG(ERR, "config interrupt mapping failed");
767 uint16_t num_qv_maps = dev->data->nb_rx_queues;
770 while (num_qv_maps > IAVF_IRQ_MAP_NUM_PER_BUF) {
771 if (iavf_config_irq_map_lv(adapter,
772 IAVF_IRQ_MAP_NUM_PER_BUF, index)) {
773 PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed");
776 num_qv_maps -= IAVF_IRQ_MAP_NUM_PER_BUF;
777 index += IAVF_IRQ_MAP_NUM_PER_BUF;
780 if (iavf_config_irq_map_lv(adapter, num_qv_maps, index)) {
781 PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed");
789 iavf_start_queues(struct rte_eth_dev *dev)
791 struct iavf_rx_queue *rxq;
792 struct iavf_tx_queue *txq;
795 for (i = 0; i < dev->data->nb_tx_queues; i++) {
796 txq = dev->data->tx_queues[i];
797 if (txq->tx_deferred_start)
799 if (iavf_dev_tx_queue_start(dev, i) != 0) {
800 PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
805 for (i = 0; i < dev->data->nb_rx_queues; i++) {
806 rxq = dev->data->rx_queues[i];
807 if (rxq->rx_deferred_start)
809 if (iavf_dev_rx_queue_start(dev, i) != 0) {
810 PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
819 iavf_dev_start(struct rte_eth_dev *dev)
821 struct iavf_adapter *adapter =
822 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
823 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
824 struct rte_intr_handle *intr_handle = dev->intr_handle;
825 uint16_t num_queue_pairs;
828 PMD_INIT_FUNC_TRACE();
830 adapter->stopped = 0;
832 vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
833 vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
834 dev->data->nb_tx_queues);
835 num_queue_pairs = vf->num_queue_pairs;
837 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
838 if (iavf_get_qos_cap(adapter)) {
839 PMD_INIT_LOG(ERR, "Failed to get qos capability");
843 if (iavf_init_queues(dev) != 0) {
844 PMD_DRV_LOG(ERR, "failed to do Queue init");
848 /* If needed, send configure queues msg multiple times to make the
849 * adminq buffer length smaller than the 4K limitation.
851 while (num_queue_pairs > IAVF_CFG_Q_NUM_PER_BUF) {
852 if (iavf_configure_queues(adapter,
853 IAVF_CFG_Q_NUM_PER_BUF, index) != 0) {
854 PMD_DRV_LOG(ERR, "configure queues failed");
857 num_queue_pairs -= IAVF_CFG_Q_NUM_PER_BUF;
858 index += IAVF_CFG_Q_NUM_PER_BUF;
861 if (iavf_configure_queues(adapter, num_queue_pairs, index) != 0) {
862 PMD_DRV_LOG(ERR, "configure queues failed");
866 if (iavf_config_rx_queues_irqs(dev, intr_handle) != 0) {
867 PMD_DRV_LOG(ERR, "configure irq failed");
870 /* re-enable intr again, because efd assign may change */
871 if (dev->data->dev_conf.intr_conf.rxq != 0) {
872 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
873 rte_intr_disable(intr_handle);
874 rte_intr_enable(intr_handle);
877 /* Set all mac addrs */
878 iavf_add_del_all_mac_addr(adapter, true);
880 /* Set all multicast addresses */
881 iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
884 if (iavf_start_queues(dev) != 0) {
885 PMD_DRV_LOG(ERR, "enable queues failed");
892 iavf_add_del_all_mac_addr(adapter, false);
898 iavf_dev_stop(struct rte_eth_dev *dev)
900 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
901 struct iavf_adapter *adapter =
902 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
903 struct rte_intr_handle *intr_handle = dev->intr_handle;
905 PMD_INIT_FUNC_TRACE();
907 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) &&
908 dev->data->dev_conf.intr_conf.rxq != 0)
909 rte_intr_disable(intr_handle);
911 if (adapter->stopped == 1)
914 iavf_stop_queues(dev);
916 /* Disable the interrupt for Rx */
917 rte_intr_efd_disable(intr_handle);
918 /* Rx interrupt vector mapping free */
919 if (intr_handle->intr_vec) {
920 rte_free(intr_handle->intr_vec);
921 intr_handle->intr_vec = NULL;
924 /* remove all mac addrs */
925 iavf_add_del_all_mac_addr(adapter, false);
927 /* remove all multicast addresses */
928 iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
931 adapter->stopped = 1;
932 dev->data->dev_started = 0;
938 iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
940 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
942 dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
943 dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
944 dev_info->min_rx_bufsize = IAVF_BUF_SIZE_MIN;
945 dev_info->max_rx_pktlen = IAVF_FRAME_SIZE_MAX;
946 dev_info->max_mtu = dev_info->max_rx_pktlen - IAVF_ETH_OVERHEAD;
947 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
948 dev_info->hash_key_size = vf->vf_res->rss_key_size;
949 dev_info->reta_size = vf->vf_res->rss_lut_size;
950 dev_info->flow_type_rss_offloads = IAVF_RSS_OFFLOAD_ALL;
951 dev_info->max_mac_addrs = IAVF_NUM_MACADDR_MAX;
952 dev_info->rx_offload_capa =
953 DEV_RX_OFFLOAD_VLAN_STRIP |
954 DEV_RX_OFFLOAD_QINQ_STRIP |
955 DEV_RX_OFFLOAD_IPV4_CKSUM |
956 DEV_RX_OFFLOAD_UDP_CKSUM |
957 DEV_RX_OFFLOAD_TCP_CKSUM |
958 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
959 DEV_RX_OFFLOAD_SCATTER |
960 DEV_RX_OFFLOAD_JUMBO_FRAME |
961 DEV_RX_OFFLOAD_VLAN_FILTER |
962 DEV_RX_OFFLOAD_RSS_HASH;
964 dev_info->tx_offload_capa =
965 DEV_TX_OFFLOAD_VLAN_INSERT |
966 DEV_TX_OFFLOAD_QINQ_INSERT |
967 DEV_TX_OFFLOAD_IPV4_CKSUM |
968 DEV_TX_OFFLOAD_UDP_CKSUM |
969 DEV_TX_OFFLOAD_TCP_CKSUM |
970 DEV_TX_OFFLOAD_SCTP_CKSUM |
971 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
972 DEV_TX_OFFLOAD_TCP_TSO |
973 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
974 DEV_TX_OFFLOAD_GRE_TNL_TSO |
975 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
976 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
977 DEV_TX_OFFLOAD_MULTI_SEGS |
978 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
980 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
981 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_KEEP_CRC;
983 dev_info->default_rxconf = (struct rte_eth_rxconf) {
984 .rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
989 dev_info->default_txconf = (struct rte_eth_txconf) {
990 .tx_free_thresh = IAVF_DEFAULT_TX_FREE_THRESH,
991 .tx_rs_thresh = IAVF_DEFAULT_TX_RS_THRESH,
995 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
996 .nb_max = IAVF_MAX_RING_DESC,
997 .nb_min = IAVF_MIN_RING_DESC,
998 .nb_align = IAVF_ALIGN_RING_DESC,
1001 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1002 .nb_max = IAVF_MAX_RING_DESC,
1003 .nb_min = IAVF_MIN_RING_DESC,
1004 .nb_align = IAVF_ALIGN_RING_DESC,
1010 static const uint32_t *
1011 iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1013 static const uint32_t ptypes[] = {
1015 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1018 RTE_PTYPE_L4_NONFRAG,
1028 iavf_dev_link_update(struct rte_eth_dev *dev,
1029 __rte_unused int wait_to_complete)
1031 struct rte_eth_link new_link;
1032 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1034 memset(&new_link, 0, sizeof(new_link));
1036 /* Only read status info stored in VF, and the info is updated
1037 * when receive LINK_CHANGE evnet from PF by Virtchnnl.
1039 switch (vf->link_speed) {
1041 new_link.link_speed = ETH_SPEED_NUM_10M;
1044 new_link.link_speed = ETH_SPEED_NUM_100M;
1047 new_link.link_speed = ETH_SPEED_NUM_1G;
1050 new_link.link_speed = ETH_SPEED_NUM_10G;
1053 new_link.link_speed = ETH_SPEED_NUM_20G;
1056 new_link.link_speed = ETH_SPEED_NUM_25G;
1059 new_link.link_speed = ETH_SPEED_NUM_40G;
1062 new_link.link_speed = ETH_SPEED_NUM_50G;
1065 new_link.link_speed = ETH_SPEED_NUM_100G;
1068 new_link.link_speed = ETH_SPEED_NUM_NONE;
1072 new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
1073 new_link.link_status = vf->link_up ? ETH_LINK_UP :
1075 new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1076 ETH_LINK_SPEED_FIXED);
1078 return rte_eth_linkstatus_set(dev, &new_link);
1082 iavf_dev_promiscuous_enable(struct rte_eth_dev *dev)
1084 struct iavf_adapter *adapter =
1085 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1086 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1088 return iavf_config_promisc(adapter,
1089 true, vf->promisc_multicast_enabled);
1093 iavf_dev_promiscuous_disable(struct rte_eth_dev *dev)
1095 struct iavf_adapter *adapter =
1096 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1097 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1099 return iavf_config_promisc(adapter,
1100 false, vf->promisc_multicast_enabled);
1104 iavf_dev_allmulticast_enable(struct rte_eth_dev *dev)
1106 struct iavf_adapter *adapter =
1107 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1108 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1110 return iavf_config_promisc(adapter,
1111 vf->promisc_unicast_enabled, true);
1115 iavf_dev_allmulticast_disable(struct rte_eth_dev *dev)
1117 struct iavf_adapter *adapter =
1118 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1119 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1121 return iavf_config_promisc(adapter,
1122 vf->promisc_unicast_enabled, false);
1126 iavf_dev_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr,
1127 __rte_unused uint32_t index,
1128 __rte_unused uint32_t pool)
1130 struct iavf_adapter *adapter =
1131 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1132 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1135 if (rte_is_zero_ether_addr(addr)) {
1136 PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1140 err = iavf_add_del_eth_addr(adapter, addr, true, VIRTCHNL_ETHER_ADDR_EXTRA);
1142 PMD_DRV_LOG(ERR, "fail to add MAC address");
1152 iavf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1154 struct iavf_adapter *adapter =
1155 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1156 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1157 struct rte_ether_addr *addr;
1160 addr = &dev->data->mac_addrs[index];
1162 err = iavf_add_del_eth_addr(adapter, addr, false, VIRTCHNL_ETHER_ADDR_EXTRA);
1164 PMD_DRV_LOG(ERR, "fail to delete MAC address");
1170 iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1172 struct iavf_adapter *adapter =
1173 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1174 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1177 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
1178 err = iavf_add_del_vlan_v2(adapter, vlan_id, on);
1184 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
1187 err = iavf_add_del_vlan(adapter, vlan_id, on);
1194 iavf_iterate_vlan_filters_v2(struct rte_eth_dev *dev, bool enable)
1196 struct rte_vlan_filter_conf *vfc = &dev->data->vlan_filter_conf;
1197 struct iavf_adapter *adapter =
1198 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1202 for (i = 0; i < RTE_DIM(vfc->ids); i++) {
1203 if (vfc->ids[i] == 0)
1207 for (j = 0; ids != 0 && j < 64; j++, ids >>= 1) {
1209 iavf_add_del_vlan_v2(adapter,
1210 64 * i + j, enable);
1216 iavf_dev_vlan_offload_set_v2(struct rte_eth_dev *dev, int mask)
1218 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1219 struct iavf_adapter *adapter =
1220 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1224 if (mask & ETH_VLAN_FILTER_MASK) {
1225 enable = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
1227 iavf_iterate_vlan_filters_v2(dev, enable);
1230 if (mask & ETH_VLAN_STRIP_MASK) {
1231 enable = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1233 err = iavf_config_vlan_strip_v2(adapter, enable);
1234 /* If not support, the stripping is already disabled by PF */
1235 if (err == -ENOTSUP && !enable)
1245 iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1247 struct iavf_adapter *adapter =
1248 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1249 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1250 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1253 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2)
1254 return iavf_dev_vlan_offload_set_v2(dev, mask);
1256 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
1259 /* Vlan stripping setting */
1260 if (mask & ETH_VLAN_STRIP_MASK) {
1261 /* Enable or disable VLAN stripping */
1262 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1263 err = iavf_enable_vlan_strip(adapter);
1265 err = iavf_disable_vlan_strip(adapter);
1274 iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
1275 struct rte_eth_rss_reta_entry64 *reta_conf,
1278 struct iavf_adapter *adapter =
1279 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1280 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1282 uint16_t i, idx, shift;
1285 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1288 if (reta_size != vf->vf_res->rss_lut_size) {
1289 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1290 "(%d) doesn't match the number of hardware can "
1291 "support (%d)", reta_size, vf->vf_res->rss_lut_size);
1295 lut = rte_zmalloc("rss_lut", reta_size, 0);
1297 PMD_DRV_LOG(ERR, "No memory can be allocated");
1300 /* store the old lut table temporarily */
1301 rte_memcpy(lut, vf->rss_lut, reta_size);
1303 for (i = 0; i < reta_size; i++) {
1304 idx = i / RTE_RETA_GROUP_SIZE;
1305 shift = i % RTE_RETA_GROUP_SIZE;
1306 if (reta_conf[idx].mask & (1ULL << shift))
1307 lut[i] = reta_conf[idx].reta[shift];
1310 rte_memcpy(vf->rss_lut, lut, reta_size);
1311 /* send virtchnnl ops to configure rss*/
1312 ret = iavf_configure_rss_lut(adapter);
1313 if (ret) /* revert back */
1314 rte_memcpy(vf->rss_lut, lut, reta_size);
1321 iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
1322 struct rte_eth_rss_reta_entry64 *reta_conf,
1325 struct iavf_adapter *adapter =
1326 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1327 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1328 uint16_t i, idx, shift;
1330 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1333 if (reta_size != vf->vf_res->rss_lut_size) {
1334 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1335 "(%d) doesn't match the number of hardware can "
1336 "support (%d)", reta_size, vf->vf_res->rss_lut_size);
1340 for (i = 0; i < reta_size; i++) {
1341 idx = i / RTE_RETA_GROUP_SIZE;
1342 shift = i % RTE_RETA_GROUP_SIZE;
1343 if (reta_conf[idx].mask & (1ULL << shift))
1344 reta_conf[idx].reta[shift] = vf->rss_lut[i];
1351 iavf_set_rss_key(struct iavf_adapter *adapter, uint8_t *key, uint8_t key_len)
1353 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1355 /* HENA setting, it is enabled by default, no change */
1356 if (!key || key_len == 0) {
1357 PMD_DRV_LOG(DEBUG, "No key to be configured");
1359 } else if (key_len != vf->vf_res->rss_key_size) {
1360 PMD_DRV_LOG(ERR, "The size of hash key configured "
1361 "(%d) doesn't match the size of hardware can "
1362 "support (%d)", key_len,
1363 vf->vf_res->rss_key_size);
1367 rte_memcpy(vf->rss_key, key, key_len);
1369 return iavf_configure_rss_key(adapter);
1373 iavf_dev_rss_hash_update(struct rte_eth_dev *dev,
1374 struct rte_eth_rss_conf *rss_conf)
1376 struct iavf_adapter *adapter =
1377 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1378 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1381 adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf = *rss_conf;
1383 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1387 ret = iavf_set_rss_key(adapter, rss_conf->rss_key,
1388 rss_conf->rss_key_len);
1392 if (rss_conf->rss_hf == 0) {
1394 ret = iavf_set_hena(adapter, 0);
1396 /* It is a workaround, temporarily allow error to be returned
1397 * due to possible lack of PF handling for hena = 0.
1400 PMD_DRV_LOG(WARNING, "fail to clean existing RSS, lack PF support");
1404 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF) {
1405 /* Clear existing RSS. */
1406 ret = iavf_set_hena(adapter, 0);
1408 /* It is a workaround, temporarily allow error to be returned
1409 * due to possible lack of PF handling for hena = 0.
1412 PMD_DRV_LOG(WARNING, "fail to clean existing RSS,"
1415 /* Set new RSS configuration. */
1416 ret = iavf_rss_hash_set(adapter, rss_conf->rss_hf, true);
1418 PMD_DRV_LOG(ERR, "fail to set new RSS");
1422 iavf_config_rss_hf(adapter, rss_conf->rss_hf);
1429 iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1430 struct rte_eth_rss_conf *rss_conf)
1432 struct iavf_adapter *adapter =
1433 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1434 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1436 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1439 rss_conf->rss_hf = vf->rss_hf;
1441 if (!rss_conf->rss_key)
1444 rss_conf->rss_key_len = vf->vf_res->rss_key_size;
1445 rte_memcpy(rss_conf->rss_key, vf->rss_key, rss_conf->rss_key_len);
1451 iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1453 uint32_t frame_size = mtu + IAVF_ETH_OVERHEAD;
1456 if (mtu < RTE_ETHER_MIN_MTU || frame_size > IAVF_FRAME_SIZE_MAX)
1459 /* mtu setting is forbidden if port is start */
1460 if (dev->data->dev_started) {
1461 PMD_DRV_LOG(ERR, "port must be stopped before configuration");
1465 if (frame_size > IAVF_ETH_MAX_LEN)
1466 dev->data->dev_conf.rxmode.offloads |=
1467 DEV_RX_OFFLOAD_JUMBO_FRAME;
1469 dev->data->dev_conf.rxmode.offloads &=
1470 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1472 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1478 iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
1479 struct rte_ether_addr *mac_addr)
1481 struct iavf_adapter *adapter =
1482 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1483 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
1484 struct rte_ether_addr *old_addr;
1487 old_addr = (struct rte_ether_addr *)hw->mac.addr;
1489 if (rte_is_same_ether_addr(old_addr, mac_addr))
1492 ret = iavf_add_del_eth_addr(adapter, old_addr, false, VIRTCHNL_ETHER_ADDR_PRIMARY);
1494 PMD_DRV_LOG(ERR, "Fail to delete old MAC:"
1495 RTE_ETHER_ADDR_PRT_FMT,
1496 RTE_ETHER_ADDR_BYTES(old_addr));
1498 ret = iavf_add_del_eth_addr(adapter, mac_addr, true, VIRTCHNL_ETHER_ADDR_PRIMARY);
1500 PMD_DRV_LOG(ERR, "Fail to add new MAC:"
1501 RTE_ETHER_ADDR_PRT_FMT,
1502 RTE_ETHER_ADDR_BYTES(mac_addr));
1507 rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)hw->mac.addr);
1512 iavf_stat_update_48(uint64_t *offset, uint64_t *stat)
1514 if (*stat >= *offset)
1515 *stat = *stat - *offset;
1517 *stat = (uint64_t)((*stat +
1518 ((uint64_t)1 << IAVF_48_BIT_WIDTH)) - *offset);
1520 *stat &= IAVF_48_BIT_MASK;
1524 iavf_stat_update_32(uint64_t *offset, uint64_t *stat)
1526 if (*stat >= *offset)
1527 *stat = (uint64_t)(*stat - *offset);
1529 *stat = (uint64_t)((*stat +
1530 ((uint64_t)1 << IAVF_32_BIT_WIDTH)) - *offset);
1534 iavf_update_stats(struct iavf_vsi *vsi, struct virtchnl_eth_stats *nes)
1536 struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset;
1538 iavf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
1539 iavf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
1540 iavf_stat_update_48(&oes->rx_multicast, &nes->rx_multicast);
1541 iavf_stat_update_48(&oes->rx_broadcast, &nes->rx_broadcast);
1542 iavf_stat_update_32(&oes->rx_discards, &nes->rx_discards);
1543 iavf_stat_update_48(&oes->tx_bytes, &nes->tx_bytes);
1544 iavf_stat_update_48(&oes->tx_unicast, &nes->tx_unicast);
1545 iavf_stat_update_48(&oes->tx_multicast, &nes->tx_multicast);
1546 iavf_stat_update_48(&oes->tx_broadcast, &nes->tx_broadcast);
1547 iavf_stat_update_32(&oes->tx_errors, &nes->tx_errors);
1548 iavf_stat_update_32(&oes->tx_discards, &nes->tx_discards);
1552 iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1554 struct iavf_adapter *adapter =
1555 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1556 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1557 struct iavf_vsi *vsi = &vf->vsi;
1558 struct virtchnl_eth_stats *pstats = NULL;
1561 ret = iavf_query_stats(adapter, &pstats);
1563 uint8_t crc_stats_len = (dev->data->dev_conf.rxmode.offloads &
1564 DEV_RX_OFFLOAD_KEEP_CRC) ? 0 :
1566 iavf_update_stats(vsi, pstats);
1567 stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
1568 pstats->rx_broadcast - pstats->rx_discards;
1569 stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
1571 stats->imissed = pstats->rx_discards;
1572 stats->oerrors = pstats->tx_errors + pstats->tx_discards;
1573 stats->ibytes = pstats->rx_bytes;
1574 stats->ibytes -= stats->ipackets * crc_stats_len;
1575 stats->obytes = pstats->tx_bytes;
1577 PMD_DRV_LOG(ERR, "Get statistics failed");
1583 iavf_dev_stats_reset(struct rte_eth_dev *dev)
1586 struct iavf_adapter *adapter =
1587 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1588 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1589 struct iavf_vsi *vsi = &vf->vsi;
1590 struct virtchnl_eth_stats *pstats = NULL;
1592 /* read stat values to clear hardware registers */
1593 ret = iavf_query_stats(adapter, &pstats);
1597 /* set stats offset base on current values */
1598 vsi->eth_stats_offset = *pstats;
1603 static int iavf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1604 struct rte_eth_xstat_name *xstats_names,
1605 __rte_unused unsigned int limit)
1609 if (xstats_names != NULL)
1610 for (i = 0; i < IAVF_NB_XSTATS; i++) {
1611 snprintf(xstats_names[i].name,
1612 sizeof(xstats_names[i].name),
1613 "%s", rte_iavf_stats_strings[i].name);
1615 return IAVF_NB_XSTATS;
1618 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
1619 struct rte_eth_xstat *xstats, unsigned int n)
1623 struct iavf_adapter *adapter =
1624 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1625 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1626 struct iavf_vsi *vsi = &vf->vsi;
1627 struct virtchnl_eth_stats *pstats = NULL;
1629 if (n < IAVF_NB_XSTATS)
1630 return IAVF_NB_XSTATS;
1632 ret = iavf_query_stats(adapter, &pstats);
1639 iavf_update_stats(vsi, pstats);
1641 /* loop over xstats array and values from pstats */
1642 for (i = 0; i < IAVF_NB_XSTATS; i++) {
1644 xstats[i].value = *(uint64_t *)(((char *)pstats) +
1645 rte_iavf_stats_strings[i].offset);
1648 return IAVF_NB_XSTATS;
1653 iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1655 struct iavf_adapter *adapter =
1656 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1657 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1658 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
1659 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1662 msix_intr = pci_dev->intr_handle.intr_vec[queue_id];
1663 if (msix_intr == IAVF_MISC_VEC_ID) {
1664 PMD_DRV_LOG(INFO, "MISC is also enabled for control");
1665 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
1666 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
1667 IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
1668 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
1671 IAVF_VFINT_DYN_CTLN1
1672 (msix_intr - IAVF_RX_VEC_START),
1673 IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
1674 IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
1675 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
1678 IAVF_WRITE_FLUSH(hw);
1680 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1681 rte_intr_ack(&pci_dev->intr_handle);
1687 iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1689 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1690 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1693 msix_intr = pci_dev->intr_handle.intr_vec[queue_id];
1694 if (msix_intr == IAVF_MISC_VEC_ID) {
1695 PMD_DRV_LOG(ERR, "MISC is used for control, cannot disable it");
1700 IAVF_VFINT_DYN_CTLN1(msix_intr - IAVF_RX_VEC_START),
1703 IAVF_WRITE_FLUSH(hw);
1708 iavf_check_vf_reset_done(struct iavf_hw *hw)
1712 for (i = 0; i < IAVF_RESET_WAIT_CNT; i++) {
1713 reset = IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) &
1714 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
1715 reset = reset >> IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT;
1716 if (reset == VIRTCHNL_VFR_VFACTIVE ||
1717 reset == VIRTCHNL_VFR_COMPLETED)
1722 if (i >= IAVF_RESET_WAIT_CNT)
1729 iavf_lookup_proto_xtr_type(const char *flex_name)
1733 enum iavf_proto_xtr_type type;
1734 } xtr_type_map[] = {
1735 { "vlan", IAVF_PROTO_XTR_VLAN },
1736 { "ipv4", IAVF_PROTO_XTR_IPV4 },
1737 { "ipv6", IAVF_PROTO_XTR_IPV6 },
1738 { "ipv6_flow", IAVF_PROTO_XTR_IPV6_FLOW },
1739 { "tcp", IAVF_PROTO_XTR_TCP },
1740 { "ip_offset", IAVF_PROTO_XTR_IP_OFFSET },
1744 for (i = 0; i < RTE_DIM(xtr_type_map); i++) {
1745 if (strcmp(flex_name, xtr_type_map[i].name) == 0)
1746 return xtr_type_map[i].type;
1749 PMD_DRV_LOG(ERR, "wrong proto_xtr type, "
1750 "it should be: vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset");
1756 * Parse elem, the elem could be single number/range or '(' ')' group
1757 * 1) A single number elem, it's just a simple digit. e.g. 9
1758 * 2) A single range elem, two digits with a '-' between. e.g. 2-6
1759 * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6)
1760 * Within group elem, '-' used for a range separator;
1761 * ',' used for a single number.
1764 iavf_parse_queue_set(const char *input, int xtr_type,
1765 struct iavf_devargs *devargs)
1767 const char *str = input;
1772 while (isblank(*str))
1775 if (!isdigit(*str) && *str != '(')
1778 /* process single number or single range of number */
1781 idx = strtoul(str, &end, 10);
1782 if (errno || !end || idx >= IAVF_MAX_QUEUE_NUM)
1785 while (isblank(*end))
1791 /* process single <number>-<number> */
1794 while (isblank(*end))
1800 idx = strtoul(end, &end, 10);
1801 if (errno || !end || idx >= IAVF_MAX_QUEUE_NUM)
1805 while (isblank(*end))
1812 for (idx = RTE_MIN(min, max);
1813 idx <= RTE_MAX(min, max); idx++)
1814 devargs->proto_xtr[idx] = xtr_type;
1819 /* process set within bracket */
1821 while (isblank(*str))
1826 min = IAVF_MAX_QUEUE_NUM;
1828 /* go ahead to the first digit */
1829 while (isblank(*str))
1834 /* get the digit value */
1836 idx = strtoul(str, &end, 10);
1837 if (errno || !end || idx >= IAVF_MAX_QUEUE_NUM)
1840 /* go ahead to separator '-',',' and ')' */
1841 while (isblank(*end))
1844 if (min == IAVF_MAX_QUEUE_NUM)
1846 else /* avoid continuous '-' */
1848 } else if (*end == ',' || *end == ')') {
1850 if (min == IAVF_MAX_QUEUE_NUM)
1853 for (idx = RTE_MIN(min, max);
1854 idx <= RTE_MAX(min, max); idx++)
1855 devargs->proto_xtr[idx] = xtr_type;
1857 min = IAVF_MAX_QUEUE_NUM;
1863 } while (*end != ')' && *end != '\0');
1869 iavf_parse_queue_proto_xtr(const char *queues, struct iavf_devargs *devargs)
1871 const char *queue_start;
1876 while (isblank(*queues))
1879 if (*queues != '[') {
1880 xtr_type = iavf_lookup_proto_xtr_type(queues);
1884 devargs->proto_xtr_dflt = xtr_type;
1891 while (isblank(*queues))
1893 if (*queues == '\0')
1896 queue_start = queues;
1898 /* go across a complete bracket */
1899 if (*queue_start == '(') {
1900 queues += strcspn(queues, ")");
1905 /* scan the separator ':' */
1906 queues += strcspn(queues, ":");
1907 if (*queues++ != ':')
1909 while (isblank(*queues))
1912 for (idx = 0; ; idx++) {
1913 if (isblank(queues[idx]) ||
1914 queues[idx] == ',' ||
1915 queues[idx] == ']' ||
1916 queues[idx] == '\0')
1919 if (idx > sizeof(flex_name) - 2)
1922 flex_name[idx] = queues[idx];
1924 flex_name[idx] = '\0';
1925 xtr_type = iavf_lookup_proto_xtr_type(flex_name);
1931 while (isblank(*queues) || *queues == ',' || *queues == ']')
1934 if (iavf_parse_queue_set(queue_start, xtr_type, devargs) < 0)
1936 } while (*queues != '\0');
1942 iavf_handle_proto_xtr_arg(__rte_unused const char *key, const char *value,
1945 struct iavf_devargs *devargs = extra_args;
1947 if (!value || !extra_args)
1950 if (iavf_parse_queue_proto_xtr(value, devargs) < 0) {
1951 PMD_DRV_LOG(ERR, "the proto_xtr's parameter is wrong : '%s'",
1959 static int iavf_parse_devargs(struct rte_eth_dev *dev)
1961 struct iavf_adapter *ad =
1962 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1963 struct rte_devargs *devargs = dev->device->devargs;
1964 struct rte_kvargs *kvlist;
1970 kvlist = rte_kvargs_parse(devargs->args, iavf_valid_args);
1972 PMD_INIT_LOG(ERR, "invalid kvargs key\n");
1976 ad->devargs.proto_xtr_dflt = IAVF_PROTO_XTR_NONE;
1977 memset(ad->devargs.proto_xtr, IAVF_PROTO_XTR_NONE,
1978 sizeof(ad->devargs.proto_xtr));
1980 ret = rte_kvargs_process(kvlist, IAVF_PROTO_XTR_ARG,
1981 &iavf_handle_proto_xtr_arg, &ad->devargs);
1986 rte_kvargs_free(kvlist);
1991 iavf_init_proto_xtr(struct rte_eth_dev *dev)
1993 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1994 struct iavf_adapter *ad =
1995 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1996 const struct iavf_proto_xtr_ol *xtr_ol;
1997 bool proto_xtr_enable = false;
2001 vf->proto_xtr = rte_zmalloc("vf proto xtr",
2002 vf->vsi_res->num_queue_pairs, 0);
2003 if (unlikely(!(vf->proto_xtr))) {
2004 PMD_DRV_LOG(ERR, "no memory for setting up proto_xtr's table");
2008 for (i = 0; i < vf->vsi_res->num_queue_pairs; i++) {
2009 vf->proto_xtr[i] = ad->devargs.proto_xtr[i] !=
2010 IAVF_PROTO_XTR_NONE ?
2011 ad->devargs.proto_xtr[i] :
2012 ad->devargs.proto_xtr_dflt;
2014 if (vf->proto_xtr[i] != IAVF_PROTO_XTR_NONE) {
2015 uint8_t type = vf->proto_xtr[i];
2017 iavf_proto_xtr_params[type].required = true;
2018 proto_xtr_enable = true;
2022 if (likely(!proto_xtr_enable))
2025 offset = rte_mbuf_dynfield_register(&iavf_proto_xtr_metadata_param);
2026 if (unlikely(offset == -1)) {
2028 "failed to extract protocol metadata, error %d",
2034 "proto_xtr metadata offset in mbuf is : %d",
2036 rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = offset;
2038 for (i = 0; i < RTE_DIM(iavf_proto_xtr_params); i++) {
2039 xtr_ol = &iavf_proto_xtr_params[i];
2041 uint8_t rxdid = iavf_proto_xtr_type_to_rxdid((uint8_t)i);
2043 if (!xtr_ol->required)
2046 if (!(vf->supported_rxdid & BIT(rxdid))) {
2048 "rxdid[%u] is not supported in hardware",
2050 rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
2054 offset = rte_mbuf_dynflag_register(&xtr_ol->param);
2055 if (unlikely(offset == -1)) {
2057 "failed to register proto_xtr offload '%s', error %d",
2058 xtr_ol->param.name, -rte_errno);
2060 rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
2065 "proto_xtr offload '%s' offset in mbuf is : %d",
2066 xtr_ol->param.name, offset);
2067 *xtr_ol->ol_flag = 1ULL << offset;
2072 iavf_init_vf(struct rte_eth_dev *dev)
2075 struct iavf_adapter *adapter =
2076 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2077 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2078 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2080 err = iavf_parse_devargs(dev);
2082 PMD_INIT_LOG(ERR, "Failed to parse devargs");
2086 err = iavf_set_mac_type(hw);
2088 PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
2092 err = iavf_check_vf_reset_done(hw);
2094 PMD_INIT_LOG(ERR, "VF is still resetting");
2098 iavf_init_adminq_parameter(hw);
2099 err = iavf_init_adminq(hw);
2101 PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
2105 vf->aq_resp = rte_zmalloc("vf_aq_resp", IAVF_AQ_BUF_SZ, 0);
2107 PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory");
2110 if (iavf_check_api_version(adapter) != 0) {
2111 PMD_INIT_LOG(ERR, "check_api version failed");
2115 bufsz = sizeof(struct virtchnl_vf_resource) +
2116 (IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
2117 vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
2119 PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
2123 if (iavf_get_vf_resource(adapter) != 0) {
2124 PMD_INIT_LOG(ERR, "iavf_get_vf_config failed");
2127 /* Allocate memort for RSS info */
2128 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2129 vf->rss_key = rte_zmalloc("rss_key",
2130 vf->vf_res->rss_key_size, 0);
2132 PMD_INIT_LOG(ERR, "unable to allocate rss_key memory");
2135 vf->rss_lut = rte_zmalloc("rss_lut",
2136 vf->vf_res->rss_lut_size, 0);
2138 PMD_INIT_LOG(ERR, "unable to allocate rss_lut memory");
2143 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
2144 if (iavf_get_supported_rxdid(adapter) != 0) {
2145 PMD_INIT_LOG(ERR, "failed to do get supported rxdid");
2150 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
2151 if (iavf_get_vlan_offload_caps_v2(adapter) != 0) {
2152 PMD_INIT_LOG(ERR, "failed to do get VLAN offload v2 capabilities");
2157 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS) {
2158 bufsz = sizeof(struct virtchnl_qos_cap_list) +
2159 IAVF_MAX_TRAFFIC_CLASS *
2160 sizeof(struct virtchnl_qos_cap_elem);
2161 vf->qos_cap = rte_zmalloc("qos_cap", bufsz, 0);
2163 PMD_INIT_LOG(ERR, "unable to allocate qos_cap memory");
2166 iavf_tm_conf_init(dev);
2169 iavf_init_proto_xtr(dev);
2173 rte_free(vf->rss_key);
2174 rte_free(vf->rss_lut);
2176 rte_free(vf->qos_cap);
2177 rte_free(vf->vf_res);
2180 rte_free(vf->aq_resp);
2182 iavf_shutdown_adminq(hw);
2188 iavf_uninit_vf(struct rte_eth_dev *dev)
2190 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2191 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2193 iavf_shutdown_adminq(hw);
2195 rte_free(vf->vf_res);
2199 rte_free(vf->aq_resp);
2202 rte_free(vf->qos_cap);
2205 rte_free(vf->rss_lut);
2207 rte_free(vf->rss_key);
2211 /* Enable default admin queue interrupt setting */
2213 iavf_enable_irq0(struct iavf_hw *hw)
2215 /* Enable admin queue interrupt trigger */
2216 IAVF_WRITE_REG(hw, IAVF_VFINT_ICR0_ENA1,
2217 IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
2219 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
2220 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
2221 IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
2222 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
2224 IAVF_WRITE_FLUSH(hw);
2228 iavf_disable_irq0(struct iavf_hw *hw)
2230 /* Disable all interrupt types */
2231 IAVF_WRITE_REG(hw, IAVF_VFINT_ICR0_ENA1, 0);
2232 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
2233 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
2234 IAVF_WRITE_FLUSH(hw);
2238 iavf_dev_interrupt_handler(void *param)
2240 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2241 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2243 iavf_disable_irq0(hw);
2245 iavf_handle_virtchnl_msg(dev);
2247 iavf_enable_irq0(hw);
2251 iavf_dev_alarm_handler(void *param)
2253 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2254 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2257 iavf_disable_irq0(hw);
2259 /* read out interrupt causes */
2260 icr0 = IAVF_READ_REG(hw, IAVF_VFINT_ICR01);
2262 if (icr0 & IAVF_VFINT_ICR01_ADMINQ_MASK) {
2263 PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported");
2264 iavf_handle_virtchnl_msg(dev);
2267 iavf_enable_irq0(hw);
2269 rte_eal_alarm_set(IAVF_ALARM_INTERVAL,
2270 iavf_dev_alarm_handler, dev);
2274 iavf_dev_flow_ops_get(struct rte_eth_dev *dev,
2275 const struct rte_flow_ops **ops)
2280 *ops = &iavf_flow_ops;
2285 iavf_default_rss_disable(struct iavf_adapter *adapter)
2287 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
2290 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2291 /* Set hena = 0 to ask PF to cleanup all existing RSS. */
2292 ret = iavf_set_hena(adapter, 0);
2294 /* It is a workaround, temporarily allow error to be
2295 * returned due to possible lack of PF handling for
2298 PMD_INIT_LOG(WARNING, "fail to disable default RSS,"
2304 iavf_dev_init(struct rte_eth_dev *eth_dev)
2306 struct iavf_adapter *adapter =
2307 IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
2308 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
2309 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
2310 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2313 PMD_INIT_FUNC_TRACE();
2315 /* assign ops func pointer */
2316 eth_dev->dev_ops = &iavf_eth_dev_ops;
2317 eth_dev->rx_queue_count = iavf_dev_rxq_count;
2318 eth_dev->rx_descriptor_status = iavf_dev_rx_desc_status;
2319 eth_dev->tx_descriptor_status = iavf_dev_tx_desc_status;
2320 eth_dev->rx_pkt_burst = &iavf_recv_pkts;
2321 eth_dev->tx_pkt_burst = &iavf_xmit_pkts;
2322 eth_dev->tx_pkt_prepare = &iavf_prep_pkts;
2324 /* For secondary processes, we don't initialise any further as primary
2325 * has already done this work. Only check if we need a different RX
2328 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2329 iavf_set_rx_function(eth_dev);
2330 iavf_set_tx_function(eth_dev);
2333 rte_eth_copy_pci_info(eth_dev, pci_dev);
2334 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2336 hw->vendor_id = pci_dev->id.vendor_id;
2337 hw->device_id = pci_dev->id.device_id;
2338 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2339 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
2340 hw->bus.bus_id = pci_dev->addr.bus;
2341 hw->bus.device = pci_dev->addr.devid;
2342 hw->bus.func = pci_dev->addr.function;
2343 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
2344 hw->back = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
2345 adapter->eth_dev = eth_dev;
2346 adapter->stopped = 1;
2348 if (iavf_init_vf(eth_dev) != 0) {
2349 PMD_INIT_LOG(ERR, "Init vf failed");
2353 /* set default ptype table */
2354 adapter->ptype_tbl = iavf_get_default_ptype_table();
2357 eth_dev->data->mac_addrs = rte_zmalloc(
2358 "iavf_mac", RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX, 0);
2359 if (!eth_dev->data->mac_addrs) {
2360 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
2361 " store MAC addresses",
2362 RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX);
2366 /* If the MAC address is not configured by host,
2367 * generate a random one.
2369 if (!rte_is_valid_assigned_ether_addr(
2370 (struct rte_ether_addr *)hw->mac.addr))
2371 rte_eth_random_addr(hw->mac.addr);
2372 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
2373 ð_dev->data->mac_addrs[0]);
2375 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
2376 /* register callback func to eal lib */
2377 rte_intr_callback_register(&pci_dev->intr_handle,
2378 iavf_dev_interrupt_handler,
2381 /* enable uio intr after callback register */
2382 rte_intr_enable(&pci_dev->intr_handle);
2384 rte_eal_alarm_set(IAVF_ALARM_INTERVAL,
2385 iavf_dev_alarm_handler, eth_dev);
2388 /* configure and enable device interrupt */
2389 iavf_enable_irq0(hw);
2391 ret = iavf_flow_init(adapter);
2393 PMD_INIT_LOG(ERR, "Failed to initialize flow");
2397 iavf_default_rss_disable(adapter);
2402 rte_free(eth_dev->data->mac_addrs);
2403 eth_dev->data->mac_addrs = NULL;
2406 iavf_uninit_vf(eth_dev);
2412 iavf_dev_close(struct rte_eth_dev *dev)
2414 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2415 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2416 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2417 struct iavf_adapter *adapter =
2418 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2419 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2422 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2425 ret = iavf_dev_stop(dev);
2427 iavf_flow_flush(dev, NULL);
2428 iavf_flow_uninit(adapter);
2431 * disable promiscuous mode before reset vf
2432 * it is a workaround solution when work with kernel driver
2433 * and it is not the normal way
2435 if (vf->promisc_unicast_enabled || vf->promisc_multicast_enabled)
2436 iavf_config_promisc(adapter, false, false);
2438 iavf_shutdown_adminq(hw);
2439 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
2440 /* disable uio intr before callback unregister */
2441 rte_intr_disable(intr_handle);
2443 /* unregister callback func from eal lib */
2444 rte_intr_callback_unregister(intr_handle,
2445 iavf_dev_interrupt_handler, dev);
2447 rte_eal_alarm_cancel(iavf_dev_alarm_handler, dev);
2449 iavf_disable_irq0(hw);
2451 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
2452 iavf_tm_conf_uninit(dev);
2454 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2456 rte_free(vf->rss_lut);
2460 rte_free(vf->rss_key);
2465 rte_free(vf->vf_res);
2469 rte_free(vf->aq_resp);
2473 * If the VF is reset via VFLR, the device will be knocked out of bus
2474 * master mode, and the driver will fail to recover from the reset. Fix
2475 * this by enabling bus mastering after every reset. In a non-VFLR case,
2476 * the bus master bit will not be disabled, and this call will have no
2479 if (vf->vf_reset && !rte_pci_set_bus_master(pci_dev, true))
2480 vf->vf_reset = false;
2486 iavf_dev_uninit(struct rte_eth_dev *dev)
2488 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2491 iavf_dev_close(dev);
2497 * Reset VF device only to re-initialize resources in PMD layer
2500 iavf_dev_reset(struct rte_eth_dev *dev)
2504 ret = iavf_dev_uninit(dev);
2508 return iavf_dev_init(dev);
2512 iavf_dcf_cap_check_handler(__rte_unused const char *key,
2513 const char *value, __rte_unused void *opaque)
2515 if (strcmp(value, "dcf"))
2522 iavf_dcf_cap_selected(struct rte_devargs *devargs)
2524 struct rte_kvargs *kvlist;
2525 const char *key = "cap";
2528 if (devargs == NULL)
2531 kvlist = rte_kvargs_parse(devargs->args, NULL);
2535 if (!rte_kvargs_count(kvlist, key))
2538 /* dcf capability selected when there's a key-value pair: cap=dcf */
2539 if (rte_kvargs_process(kvlist, key,
2540 iavf_dcf_cap_check_handler, NULL) < 0)
2546 rte_kvargs_free(kvlist);
2550 static int eth_iavf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2551 struct rte_pci_device *pci_dev)
2553 if (iavf_dcf_cap_selected(pci_dev->device.devargs))
2556 return rte_eth_dev_pci_generic_probe(pci_dev,
2557 sizeof(struct iavf_adapter), iavf_dev_init);
2560 static int eth_iavf_pci_remove(struct rte_pci_device *pci_dev)
2562 return rte_eth_dev_pci_generic_remove(pci_dev, iavf_dev_uninit);
2565 /* Adaptive virtual function driver struct */
2566 static struct rte_pci_driver rte_iavf_pmd = {
2567 .id_table = pci_id_iavf_map,
2568 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2569 .probe = eth_iavf_pci_probe,
2570 .remove = eth_iavf_pci_remove,
2573 RTE_PMD_REGISTER_PCI(net_iavf, rte_iavf_pmd);
2574 RTE_PMD_REGISTER_PCI_TABLE(net_iavf, pci_id_iavf_map);
2575 RTE_PMD_REGISTER_KMOD_DEP(net_iavf, "* igb_uio | vfio-pci");
2576 RTE_PMD_REGISTER_PARAM_STRING(net_iavf, "cap=dcf");
2577 RTE_LOG_REGISTER_SUFFIX(iavf_logtype_init, init, NOTICE);
2578 RTE_LOG_REGISTER_SUFFIX(iavf_logtype_driver, driver, NOTICE);
2579 #ifdef RTE_ETHDEV_DEBUG_RX
2580 RTE_LOG_REGISTER_SUFFIX(iavf_logtype_rx, rx, DEBUG);
2582 #ifdef RTE_ETHDEV_DEBUG_TX
2583 RTE_LOG_REGISTER_SUFFIX(iavf_logtype_tx, tx, DEBUG);