1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
7 #include <rte_bus_pci.h>
8 #include <rte_ethdev.h>
10 #include <rte_malloc.h>
13 #include <rte_sched.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_spinlock.h>
18 #include <rte_rawdev.h>
19 #include <rte_rawdev_pmd.h>
20 #include <rte_bus_ifpga.h>
21 #include <ifpga_logs.h>
23 #include "ipn3ke_rawdev_api.h"
24 #include "ipn3ke_flow.h"
25 #include "ipn3ke_logs.h"
26 #include "ipn3ke_ethdev.h"
28 static int ipn3ke_rpst_scan_num;
29 static pthread_t ipn3ke_rpst_scan_thread;
31 /** Double linked list of representor port. */
32 TAILQ_HEAD(ipn3ke_rpst_list, ipn3ke_rpst);
34 static struct ipn3ke_rpst_list ipn3ke_rpst_list =
35 TAILQ_HEAD_INITIALIZER(ipn3ke_rpst_list);
37 static rte_spinlock_t ipn3ke_link_notify_list_lk = RTE_SPINLOCK_INITIALIZER;
40 ipn3ke_rpst_link_check(struct ipn3ke_rpst *rpst);
43 ipn3ke_rpst_dev_infos_get(struct rte_eth_dev *ethdev,
44 struct rte_eth_dev_info *dev_info)
46 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
47 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
49 dev_info->speed_capa =
50 (hw->retimer.mac_type ==
51 IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) ?
53 ((hw->retimer.mac_type ==
54 IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) ?
56 ETH_LINK_SPEED_AUTONEG);
58 dev_info->max_rx_queues = 1;
59 dev_info->max_tx_queues = 1;
60 dev_info->min_rx_bufsize = IPN3KE_AFU_BUF_SIZE_MIN;
61 dev_info->max_rx_pktlen = IPN3KE_AFU_FRAME_SIZE_MAX;
62 dev_info->max_mac_addrs = hw->port_num;
63 dev_info->max_vfs = 0;
64 dev_info->default_txconf = (struct rte_eth_txconf) {
67 dev_info->rx_queue_offload_capa = 0;
68 dev_info->rx_offload_capa =
69 DEV_RX_OFFLOAD_VLAN_STRIP |
70 DEV_RX_OFFLOAD_QINQ_STRIP |
71 DEV_RX_OFFLOAD_IPV4_CKSUM |
72 DEV_RX_OFFLOAD_UDP_CKSUM |
73 DEV_RX_OFFLOAD_TCP_CKSUM |
74 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
75 DEV_RX_OFFLOAD_VLAN_EXTEND |
76 DEV_RX_OFFLOAD_VLAN_FILTER |
77 DEV_RX_OFFLOAD_JUMBO_FRAME;
79 dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
80 dev_info->tx_offload_capa =
81 DEV_TX_OFFLOAD_VLAN_INSERT |
82 DEV_TX_OFFLOAD_QINQ_INSERT |
83 DEV_TX_OFFLOAD_IPV4_CKSUM |
84 DEV_TX_OFFLOAD_UDP_CKSUM |
85 DEV_TX_OFFLOAD_TCP_CKSUM |
86 DEV_TX_OFFLOAD_SCTP_CKSUM |
87 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
88 DEV_TX_OFFLOAD_TCP_TSO |
89 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
90 DEV_TX_OFFLOAD_GRE_TNL_TSO |
91 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
92 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
93 DEV_TX_OFFLOAD_MULTI_SEGS |
94 dev_info->tx_queue_offload_capa;
97 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
98 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
100 dev_info->switch_info.name = ethdev->device->name;
101 dev_info->switch_info.domain_id = rpst->switch_domain_id;
102 dev_info->switch_info.port_id = rpst->port_id;
106 ipn3ke_rpst_dev_configure(__rte_unused struct rte_eth_dev *dev)
112 ipn3ke_rpst_dev_start(struct rte_eth_dev *dev)
114 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
115 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
116 struct rte_rawdev *rawdev;
119 char attr_name[IPN3KE_RAWDEV_ATTR_LEN_MAX];
123 memset(attr_name, 0, sizeof(attr_name));
124 snprintf(attr_name, IPN3KE_RAWDEV_ATTR_LEN_MAX, "%s",
126 rawdev->dev_ops->attr_get(rawdev, attr_name, &base_mac);
127 ether_addr_copy((struct ether_addr *)&base_mac, &rpst->mac_addr);
129 ether_addr_copy(&rpst->mac_addr, &dev->data->mac_addrs[0]);
130 dev->data->mac_addrs->addr_bytes[ETHER_ADDR_LEN - 1] =
131 (uint8_t)rpst->port_id + 1;
133 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
134 /* Set mac address */
135 rte_memcpy(((char *)(&val)),
136 (char *)&dev->data->mac_addrs->addr_bytes[0],
138 (*hw->f_mac_write)(hw,
140 IPN3KE_MAC_PRIMARY_MAC_ADDR0,
143 rte_memcpy(((char *)(&val)),
144 (char *)&dev->data->mac_addrs->addr_bytes[4],
146 (*hw->f_mac_write)(hw,
148 IPN3KE_MAC_PRIMARY_MAC_ADDR1,
152 /* Enable the TX path */
153 ipn3ke_xmac_tx_enable(hw, rpst->port_id, 0);
155 /* Disables source address override */
156 ipn3ke_xmac_smac_ovd_dis(hw, rpst->port_id, 0);
158 /* Enable the RX path */
159 ipn3ke_xmac_rx_enable(hw, rpst->port_id, 0);
161 /* Clear all TX statistics counters */
162 ipn3ke_xmac_tx_clr_stcs(hw, rpst->port_id, 0);
164 /* Clear all RX statistics counters */
165 ipn3ke_xmac_rx_clr_stcs(hw, rpst->port_id, 0);
168 ipn3ke_rpst_link_update(dev, 0);
174 ipn3ke_rpst_dev_stop(struct rte_eth_dev *dev)
176 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
177 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
179 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
180 /* Disable the TX path */
181 ipn3ke_xmac_tx_disable(hw, rpst->port_id, 0);
183 /* Disable the RX path */
184 ipn3ke_xmac_rx_disable(hw, rpst->port_id, 0);
189 ipn3ke_rpst_dev_close(struct rte_eth_dev *dev)
191 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
192 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
194 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
195 /* Disable the TX path */
196 ipn3ke_xmac_tx_disable(hw, rpst->port_id, 0);
198 /* Disable the RX path */
199 ipn3ke_xmac_rx_disable(hw, rpst->port_id, 0);
204 * Reset PF device only to re-initialize resources in PMD layer
207 ipn3ke_rpst_dev_reset(struct rte_eth_dev *dev)
209 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
210 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
212 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
213 /* Disable the TX path */
214 ipn3ke_xmac_tx_disable(hw, rpst->port_id, 0);
216 /* Disable the RX path */
217 ipn3ke_xmac_rx_disable(hw, rpst->port_id, 0);
224 ipn3ke_rpst_rx_queue_start(__rte_unused struct rte_eth_dev *dev,
225 __rte_unused uint16_t rx_queue_id)
231 ipn3ke_rpst_rx_queue_stop(__rte_unused struct rte_eth_dev *dev,
232 __rte_unused uint16_t rx_queue_id)
238 ipn3ke_rpst_tx_queue_start(__rte_unused struct rte_eth_dev *dev,
239 __rte_unused uint16_t tx_queue_id)
245 ipn3ke_rpst_tx_queue_stop(__rte_unused struct rte_eth_dev *dev,
246 __rte_unused uint16_t tx_queue_id)
252 ipn3ke_rpst_rx_queue_setup(__rte_unused struct rte_eth_dev *dev,
253 __rte_unused uint16_t queue_idx, __rte_unused uint16_t nb_desc,
254 __rte_unused unsigned int socket_id,
255 __rte_unused const struct rte_eth_rxconf *rx_conf,
256 __rte_unused struct rte_mempool *mp)
262 ipn3ke_rpst_rx_queue_release(__rte_unused void *rxq)
267 ipn3ke_rpst_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
268 __rte_unused uint16_t queue_idx, __rte_unused uint16_t nb_desc,
269 __rte_unused unsigned int socket_id,
270 __rte_unused const struct rte_eth_txconf *tx_conf)
276 ipn3ke_rpst_tx_queue_release(__rte_unused void *txq)
281 ipn3ke_rpst_stats_get(__rte_unused struct rte_eth_dev *ethdev,
282 __rte_unused struct rte_eth_stats *stats)
288 ipn3ke_rpst_xstats_get(__rte_unused struct rte_eth_dev *dev,
289 __rte_unused struct rte_eth_xstat *xstats, __rte_unused unsigned int n)
295 ipn3ke_rpst_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
296 __rte_unused struct rte_eth_xstat_name *xstats_names,
297 __rte_unused unsigned int limit)
303 ipn3ke_rpst_stats_reset(__rte_unused struct rte_eth_dev *ethdev)
308 ipn3ke_update_link(struct rte_rawdev *rawdev,
309 uint16_t port, struct rte_eth_link *link)
311 uint64_t line_link_bitmap = 0;
312 enum ifpga_rawdev_link_speed link_speed;
314 rawdev->dev_ops->attr_get(rawdev,
315 "LineSideLinkStatus",
316 (uint64_t *)&line_link_bitmap);
318 /* Parse the link status */
319 if ((1 << port) & line_link_bitmap)
320 link->link_status = 1;
322 link->link_status = 0;
324 IPN3KE_AFU_PMD_DEBUG("port is %d\n", port);
325 IPN3KE_AFU_PMD_DEBUG("link->link_status is %d\n", link->link_status);
327 rawdev->dev_ops->attr_get(rawdev,
329 (uint64_t *)&link_speed);
330 switch (link_speed) {
331 case IFPGA_RAWDEV_LINK_SPEED_10GB:
332 link->link_speed = ETH_SPEED_NUM_10G;
334 case IFPGA_RAWDEV_LINK_SPEED_25GB:
335 link->link_speed = ETH_SPEED_NUM_25G;
338 IPN3KE_AFU_PMD_ERR("Unknown link speed info %u", link_speed);
344 * Set device link up.
347 ipn3ke_rpst_dev_set_link_up(struct rte_eth_dev *dev)
349 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
350 struct rte_eth_dev *pf;
353 if (rpst->i40e_pf_eth) {
354 ret = rte_eth_dev_set_link_up(rpst->i40e_pf_eth_port_id);
355 pf = rpst->i40e_pf_eth;
356 (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1);
363 * Set device link down.
366 ipn3ke_rpst_dev_set_link_down(struct rte_eth_dev *dev)
368 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
369 struct rte_eth_dev *pf;
372 if (rpst->i40e_pf_eth) {
373 ret = rte_eth_dev_set_link_down(rpst->i40e_pf_eth_port_id);
374 pf = rpst->i40e_pf_eth;
375 (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1);
382 ipn3ke_rpst_link_update(struct rte_eth_dev *ethdev,
383 __rte_unused int wait_to_complete)
385 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
386 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
387 struct rte_rawdev *rawdev;
388 struct rte_eth_link link;
389 struct rte_eth_dev *pf;
391 memset(&link, 0, sizeof(link));
393 link.link_duplex = ETH_LINK_FULL_DUPLEX;
394 link.link_autoneg = !(ethdev->data->dev_conf.link_speeds &
395 ETH_LINK_SPEED_FIXED);
398 ipn3ke_update_link(rawdev, rpst->port_id, &link);
400 if (!rpst->ori_linfo.link_status &&
402 IPN3KE_AFU_PMD_DEBUG("Update Rpst %d Up\n", rpst->port_id);
403 rpst->ori_linfo.link_status = link.link_status;
404 rpst->ori_linfo.link_speed = link.link_speed;
406 rte_eth_linkstatus_set(ethdev, &link);
408 if (rpst->i40e_pf_eth) {
409 IPN3KE_AFU_PMD_DEBUG("Update FVL PF %d Up\n",
410 rpst->i40e_pf_eth_port_id);
411 rte_eth_dev_set_link_up(rpst->i40e_pf_eth_port_id);
412 pf = rpst->i40e_pf_eth;
413 (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1);
415 } else if (rpst->ori_linfo.link_status &&
417 IPN3KE_AFU_PMD_DEBUG("Update Rpst %d Down\n",
419 rpst->ori_linfo.link_status = link.link_status;
420 rpst->ori_linfo.link_speed = link.link_speed;
422 rte_eth_linkstatus_set(ethdev, &link);
424 if (rpst->i40e_pf_eth) {
425 IPN3KE_AFU_PMD_DEBUG("Update FVL PF %d Down\n",
426 rpst->i40e_pf_eth_port_id);
427 rte_eth_dev_set_link_down(rpst->i40e_pf_eth_port_id);
428 pf = rpst->i40e_pf_eth;
429 (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1);
437 ipn3ke_rpst_link_check(struct ipn3ke_rpst *rpst)
439 struct ipn3ke_hw *hw;
440 struct rte_rawdev *rawdev;
441 struct rte_eth_link link;
442 struct rte_eth_dev *pf;
449 memset(&link, 0, sizeof(link));
451 link.link_duplex = ETH_LINK_FULL_DUPLEX;
452 link.link_autoneg = !(rpst->ethdev->data->dev_conf.link_speeds &
453 ETH_LINK_SPEED_FIXED);
456 ipn3ke_update_link(rawdev, rpst->port_id, &link);
458 if (!rpst->ori_linfo.link_status &&
460 IPN3KE_AFU_PMD_DEBUG("Check Rpst %d Up\n", rpst->port_id);
461 rpst->ori_linfo.link_status = link.link_status;
462 rpst->ori_linfo.link_speed = link.link_speed;
464 rte_eth_linkstatus_set(rpst->ethdev, &link);
466 if (rpst->i40e_pf_eth) {
467 IPN3KE_AFU_PMD_DEBUG("Check FVL PF %d Up\n",
468 rpst->i40e_pf_eth_port_id);
469 rte_eth_dev_set_link_up(rpst->i40e_pf_eth_port_id);
470 pf = rpst->i40e_pf_eth;
471 (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1);
473 } else if (rpst->ori_linfo.link_status &&
475 IPN3KE_AFU_PMD_DEBUG("Check Rpst %d Down\n", rpst->port_id);
476 rpst->ori_linfo.link_status = link.link_status;
477 rpst->ori_linfo.link_speed = link.link_speed;
479 rte_eth_linkstatus_set(rpst->ethdev, &link);
481 if (rpst->i40e_pf_eth) {
482 IPN3KE_AFU_PMD_DEBUG("Check FVL PF %d Down\n",
483 rpst->i40e_pf_eth_port_id);
484 rte_eth_dev_set_link_down(rpst->i40e_pf_eth_port_id);
485 pf = rpst->i40e_pf_eth;
486 (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1);
494 ipn3ke_rpst_scan_handle_request(__rte_unused void *param)
496 struct ipn3ke_rpst *rpst;
503 TAILQ_FOREACH(rpst, &ipn3ke_rpst_list, next) {
504 if (rpst->i40e_pf_eth &&
505 rpst->ethdev->data->dev_started &&
506 rpst->i40e_pf_eth->data->dev_started)
507 ipn3ke_rpst_link_check(rpst);
509 if (++num > SCAN_NUM)
510 rte_delay_us(1 * MS);
512 rte_delay_us(50 * MS);
522 ipn3ke_rpst_scan_check(void)
526 if (ipn3ke_rpst_scan_num == 1) {
527 ret = pthread_create(&ipn3ke_rpst_scan_thread,
529 ipn3ke_rpst_scan_handle_request, NULL);
531 IPN3KE_AFU_PMD_ERR("Fail to create ipn3ke rpst scan thread");
534 } else if (ipn3ke_rpst_scan_num == 0) {
535 ret = pthread_cancel(ipn3ke_rpst_scan_thread);
537 IPN3KE_AFU_PMD_ERR("Can't cancel the thread");
539 ret = pthread_join(ipn3ke_rpst_scan_thread, NULL);
541 IPN3KE_AFU_PMD_ERR("Can't join the thread");
550 ipn3ke_rpst_promiscuous_enable(struct rte_eth_dev *ethdev)
552 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
553 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
554 uint32_t rddata, val;
556 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
557 /* Enable all unicast */
558 (*hw->f_mac_read)(hw,
560 IPN3KE_MAC_RX_FRAME_CONTROL,
564 val &= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLUCAST_MASK;
566 (*hw->f_mac_write)(hw,
568 IPN3KE_MAC_RX_FRAME_CONTROL,
575 ipn3ke_rpst_promiscuous_disable(struct rte_eth_dev *ethdev)
577 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
578 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
579 uint32_t rddata, val;
581 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
582 /* Disable all unicast */
583 (*hw->f_mac_read)(hw,
585 IPN3KE_MAC_RX_FRAME_CONTROL,
589 val &= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLUCAST_MASK;
591 (*hw->f_mac_write)(hw,
593 IPN3KE_MAC_RX_FRAME_CONTROL,
600 ipn3ke_rpst_allmulticast_enable(struct rte_eth_dev *ethdev)
602 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
603 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
604 uint32_t rddata, val;
606 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
607 /* Enable all unicast */
608 (*hw->f_mac_read)(hw,
610 IPN3KE_MAC_RX_FRAME_CONTROL,
614 val <<= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLMCAST_SHIFT;
615 val &= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLMCAST_MASK;
617 (*hw->f_mac_write)(hw,
619 IPN3KE_MAC_RX_FRAME_CONTROL,
626 ipn3ke_rpst_allmulticast_disable(struct rte_eth_dev *ethdev)
628 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
629 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
630 uint32_t rddata, val;
632 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
633 /* Disable all unicast */
634 (*hw->f_mac_read)(hw,
636 IPN3KE_MAC_RX_FRAME_CONTROL,
640 val <<= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLMCAST_SHIFT;
641 val &= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLMCAST_MASK;
643 (*hw->f_mac_write)(hw,
645 IPN3KE_MAC_RX_FRAME_CONTROL,
652 ipn3ke_rpst_mac_addr_set(struct rte_eth_dev *ethdev,
653 struct ether_addr *mac_addr)
655 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
656 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
659 if (!is_valid_assigned_ether_addr(mac_addr)) {
660 IPN3KE_AFU_PMD_ERR("Tried to set invalid MAC address.");
664 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
665 ether_addr_copy(&mac_addr[0], &rpst->mac_addr);
667 /* Set mac address */
668 rte_memcpy(((char *)(&val)), &mac_addr[0], sizeof(uint32_t));
669 (*hw->f_mac_write)(hw,
671 IPN3KE_MAC_PRIMARY_MAC_ADDR0,
674 rte_memcpy(((char *)(&val)), &mac_addr[4], sizeof(uint16_t));
675 (*hw->f_mac_write)(hw,
677 IPN3KE_MAC_PRIMARY_MAC_ADDR0,
686 ipn3ke_rpst_mtu_set(struct rte_eth_dev *ethdev, uint16_t mtu)
689 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
690 struct rte_eth_dev_data *dev_data = ethdev->data;
691 uint32_t frame_size = mtu + IPN3KE_ETH_OVERHEAD;
693 /* check if mtu is within the allowed range */
694 if (mtu < ETHER_MIN_MTU ||
695 frame_size > IPN3KE_MAC_FRAME_SIZE_MAX)
698 /* mtu setting is forbidden if port is start */
699 /* make sure NIC port is stopped */
700 if (rpst->i40e_pf_eth && rpst->i40e_pf_eth->data->dev_started) {
701 IPN3KE_AFU_PMD_ERR("NIC port %d must "
702 "be stopped before configuration",
703 rpst->i40e_pf_eth->data->port_id);
706 /* mtu setting is forbidden if port is start */
707 if (dev_data->dev_started) {
708 IPN3KE_AFU_PMD_ERR("FPGA port %d must "
709 "be stopped before configuration",
714 if (frame_size > ETHER_MAX_LEN)
715 dev_data->dev_conf.rxmode.offloads |=
716 (uint64_t)(DEV_RX_OFFLOAD_JUMBO_FRAME);
718 dev_data->dev_conf.rxmode.offloads &=
719 (uint64_t)(~DEV_RX_OFFLOAD_JUMBO_FRAME);
721 dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
723 if (rpst->i40e_pf_eth) {
724 ret = rpst->i40e_pf_eth->dev_ops->mtu_set(rpst->i40e_pf_eth,
727 rpst->i40e_pf_eth->data->mtu = mtu;
734 ipn3ke_afu_filter_ctrl(struct rte_eth_dev *ethdev,
735 enum rte_filter_type filter_type, enum rte_filter_op filter_op,
738 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
739 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
746 switch (filter_type) {
747 case RTE_ETH_FILTER_GENERIC:
748 if (filter_op != RTE_ETH_FILTER_GET)
750 *(const void **)arg = &ipn3ke_flow_ops;
753 IPN3KE_AFU_PMD_WARN("Filter type (%d) not supported",
758 else if (rpst->i40e_pf_eth)
759 (*rpst->i40e_pf_eth->dev_ops->filter_ctrl)(ethdev,
769 static const struct eth_dev_ops ipn3ke_rpst_dev_ops = {
770 .dev_infos_get = ipn3ke_rpst_dev_infos_get,
772 .dev_configure = ipn3ke_rpst_dev_configure,
773 .dev_start = ipn3ke_rpst_dev_start,
774 .dev_stop = ipn3ke_rpst_dev_stop,
775 .dev_close = ipn3ke_rpst_dev_close,
776 .dev_reset = ipn3ke_rpst_dev_reset,
778 .stats_get = ipn3ke_rpst_stats_get,
779 .xstats_get = ipn3ke_rpst_xstats_get,
780 .xstats_get_names = ipn3ke_rpst_xstats_get_names,
781 .stats_reset = ipn3ke_rpst_stats_reset,
782 .xstats_reset = ipn3ke_rpst_stats_reset,
784 .filter_ctrl = ipn3ke_afu_filter_ctrl,
786 .rx_queue_start = ipn3ke_rpst_rx_queue_start,
787 .rx_queue_stop = ipn3ke_rpst_rx_queue_stop,
788 .tx_queue_start = ipn3ke_rpst_tx_queue_start,
789 .tx_queue_stop = ipn3ke_rpst_tx_queue_stop,
790 .rx_queue_setup = ipn3ke_rpst_rx_queue_setup,
791 .rx_queue_release = ipn3ke_rpst_rx_queue_release,
792 .tx_queue_setup = ipn3ke_rpst_tx_queue_setup,
793 .tx_queue_release = ipn3ke_rpst_tx_queue_release,
795 .dev_set_link_up = ipn3ke_rpst_dev_set_link_up,
796 .dev_set_link_down = ipn3ke_rpst_dev_set_link_down,
797 .link_update = ipn3ke_rpst_link_update,
799 .promiscuous_enable = ipn3ke_rpst_promiscuous_enable,
800 .promiscuous_disable = ipn3ke_rpst_promiscuous_disable,
801 .allmulticast_enable = ipn3ke_rpst_allmulticast_enable,
802 .allmulticast_disable = ipn3ke_rpst_allmulticast_disable,
803 .mac_addr_set = ipn3ke_rpst_mac_addr_set,
804 .mtu_set = ipn3ke_rpst_mtu_set,
806 .tm_ops_get = ipn3ke_tm_ops_get,
809 static uint16_t ipn3ke_rpst_recv_pkts(__rte_unused void *rx_q,
810 __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts)
816 ipn3ke_rpst_xmit_pkts(__rte_unused void *tx_queue,
817 __rte_unused struct rte_mbuf **tx_pkts, __rte_unused uint16_t nb_pkts)
823 ipn3ke_rpst_init(struct rte_eth_dev *ethdev, void *init_params)
825 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
826 struct ipn3ke_rpst *representor_param =
827 (struct ipn3ke_rpst *)init_params;
829 if (representor_param->port_id >= representor_param->hw->port_num)
832 rpst->ethdev = ethdev;
833 rpst->switch_domain_id = representor_param->switch_domain_id;
834 rpst->port_id = representor_param->port_id;
835 rpst->hw = representor_param->hw;
836 rpst->i40e_pf_eth = NULL;
837 rpst->i40e_pf_eth_port_id = 0xFFFF;
839 ethdev->data->mac_addrs = rte_zmalloc("ipn3ke", ETHER_ADDR_LEN, 0);
840 if (!ethdev->data->mac_addrs) {
841 IPN3KE_AFU_PMD_ERR("Failed to "
842 "allocated memory for storing mac address");
846 if (rpst->hw->tm_hw_enable)
847 ipn3ke_tm_init(rpst);
849 /* Set representor device ops */
850 ethdev->dev_ops = &ipn3ke_rpst_dev_ops;
852 /* No data-path, but need stub Rx/Tx functions to avoid crash
853 * when testing with the likes of testpmd.
855 ethdev->rx_pkt_burst = ipn3ke_rpst_recv_pkts;
856 ethdev->tx_pkt_burst = ipn3ke_rpst_xmit_pkts;
858 ethdev->data->nb_rx_queues = 1;
859 ethdev->data->nb_tx_queues = 1;
861 ethdev->data->mac_addrs = rte_zmalloc("ipn3ke_afu_representor",
864 if (!ethdev->data->mac_addrs) {
865 IPN3KE_AFU_PMD_ERR("Failed to "
866 "allocated memory for storing mac address");
870 ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
872 rte_spinlock_lock(&ipn3ke_link_notify_list_lk);
873 TAILQ_INSERT_TAIL(&ipn3ke_rpst_list, rpst, next);
874 ipn3ke_rpst_scan_num++;
875 ipn3ke_rpst_scan_check();
876 rte_spinlock_unlock(&ipn3ke_link_notify_list_lk);
882 ipn3ke_rpst_uninit(struct rte_eth_dev *ethdev)
884 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
886 rte_spinlock_lock(&ipn3ke_link_notify_list_lk);
887 TAILQ_REMOVE(&ipn3ke_rpst_list, rpst, next);
888 ipn3ke_rpst_scan_num--;
889 ipn3ke_rpst_scan_check();
890 rte_spinlock_unlock(&ipn3ke_link_notify_list_lk);