1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
7 #include <rte_bus_pci.h>
8 #include <rte_ethdev.h>
10 #include <rte_malloc.h>
13 #include <rte_sched.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_spinlock.h>
18 #include <rte_rawdev.h>
19 #include <rte_rawdev_pmd.h>
20 #include <rte_bus_ifpga.h>
21 #include <ifpga_logs.h>
23 #include "ipn3ke_rawdev_api.h"
24 #include "ipn3ke_logs.h"
25 #include "ipn3ke_ethdev.h"
27 static int ipn3ke_rpst_scan_num;
28 static pthread_t ipn3ke_rpst_scan_thread;
30 /** Double linked list of representor port. */
31 TAILQ_HEAD(ipn3ke_rpst_list, ipn3ke_rpst);
33 static struct ipn3ke_rpst_list ipn3ke_rpst_list =
34 TAILQ_HEAD_INITIALIZER(ipn3ke_rpst_list);
36 static rte_spinlock_t ipn3ke_link_notify_list_lk = RTE_SPINLOCK_INITIALIZER;
39 ipn3ke_rpst_link_check(struct ipn3ke_rpst *rpst);
42 ipn3ke_rpst_dev_infos_get(struct rte_eth_dev *ethdev,
43 struct rte_eth_dev_info *dev_info)
45 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
46 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
48 dev_info->speed_capa =
49 (hw->retimer.mac_type ==
50 IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) ?
52 ((hw->retimer.mac_type ==
53 IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) ?
55 ETH_LINK_SPEED_AUTONEG);
57 dev_info->max_rx_queues = 1;
58 dev_info->max_tx_queues = 1;
59 dev_info->min_rx_bufsize = IPN3KE_AFU_BUF_SIZE_MIN;
60 dev_info->max_rx_pktlen = IPN3KE_AFU_FRAME_SIZE_MAX;
61 dev_info->max_mac_addrs = hw->port_num;
62 dev_info->max_vfs = 0;
63 dev_info->default_txconf = (struct rte_eth_txconf) {
66 dev_info->rx_queue_offload_capa = 0;
67 dev_info->rx_offload_capa =
68 DEV_RX_OFFLOAD_VLAN_STRIP |
69 DEV_RX_OFFLOAD_QINQ_STRIP |
70 DEV_RX_OFFLOAD_IPV4_CKSUM |
71 DEV_RX_OFFLOAD_UDP_CKSUM |
72 DEV_RX_OFFLOAD_TCP_CKSUM |
73 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
74 DEV_RX_OFFLOAD_VLAN_EXTEND |
75 DEV_RX_OFFLOAD_VLAN_FILTER |
76 DEV_RX_OFFLOAD_JUMBO_FRAME;
78 dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
79 dev_info->tx_offload_capa =
80 DEV_TX_OFFLOAD_VLAN_INSERT |
81 DEV_TX_OFFLOAD_QINQ_INSERT |
82 DEV_TX_OFFLOAD_IPV4_CKSUM |
83 DEV_TX_OFFLOAD_UDP_CKSUM |
84 DEV_TX_OFFLOAD_TCP_CKSUM |
85 DEV_TX_OFFLOAD_SCTP_CKSUM |
86 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
87 DEV_TX_OFFLOAD_TCP_TSO |
88 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
89 DEV_TX_OFFLOAD_GRE_TNL_TSO |
90 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
91 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
92 DEV_TX_OFFLOAD_MULTI_SEGS |
93 dev_info->tx_queue_offload_capa;
96 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
97 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
99 dev_info->switch_info.name = ethdev->device->name;
100 dev_info->switch_info.domain_id = rpst->switch_domain_id;
101 dev_info->switch_info.port_id = rpst->port_id;
105 ipn3ke_rpst_dev_configure(__rte_unused struct rte_eth_dev *dev)
111 ipn3ke_rpst_dev_start(struct rte_eth_dev *dev)
113 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
114 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
115 struct rte_rawdev *rawdev;
118 char attr_name[IPN3KE_RAWDEV_ATTR_LEN_MAX];
122 memset(attr_name, 0, sizeof(attr_name));
123 snprintf(attr_name, IPN3KE_RAWDEV_ATTR_LEN_MAX, "%s",
125 rawdev->dev_ops->attr_get(rawdev, attr_name, &base_mac);
126 ether_addr_copy((struct ether_addr *)&base_mac, &rpst->mac_addr);
128 ether_addr_copy(&rpst->mac_addr, &dev->data->mac_addrs[0]);
129 dev->data->mac_addrs->addr_bytes[ETHER_ADDR_LEN - 1] =
130 (uint8_t)rpst->port_id + 1;
132 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
133 /* Set mac address */
134 rte_memcpy(((char *)(&val)),
135 (char *)&dev->data->mac_addrs->addr_bytes[0],
137 (*hw->f_mac_write)(hw,
139 IPN3KE_MAC_PRIMARY_MAC_ADDR0,
142 rte_memcpy(((char *)(&val)),
143 (char *)&dev->data->mac_addrs->addr_bytes[4],
145 (*hw->f_mac_write)(hw,
147 IPN3KE_MAC_PRIMARY_MAC_ADDR1,
151 /* Enable the TX path */
152 ipn3ke_xmac_tx_enable(hw, rpst->port_id, 0);
154 /* Disables source address override */
155 ipn3ke_xmac_smac_ovd_dis(hw, rpst->port_id, 0);
157 /* Enable the RX path */
158 ipn3ke_xmac_rx_enable(hw, rpst->port_id, 0);
160 /* Clear all TX statistics counters */
161 ipn3ke_xmac_tx_clr_stcs(hw, rpst->port_id, 0);
163 /* Clear all RX statistics counters */
164 ipn3ke_xmac_rx_clr_stcs(hw, rpst->port_id, 0);
167 ipn3ke_rpst_link_update(dev, 0);
173 ipn3ke_rpst_dev_stop(struct rte_eth_dev *dev)
175 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
176 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
178 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
179 /* Disable the TX path */
180 ipn3ke_xmac_tx_disable(hw, rpst->port_id, 0);
182 /* Disable the RX path */
183 ipn3ke_xmac_rx_disable(hw, rpst->port_id, 0);
188 ipn3ke_rpst_dev_close(struct rte_eth_dev *dev)
190 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
191 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
193 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
194 /* Disable the TX path */
195 ipn3ke_xmac_tx_disable(hw, rpst->port_id, 0);
197 /* Disable the RX path */
198 ipn3ke_xmac_rx_disable(hw, rpst->port_id, 0);
203 * Reset PF device only to re-initialize resources in PMD layer
206 ipn3ke_rpst_dev_reset(struct rte_eth_dev *dev)
208 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
209 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
211 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
212 /* Disable the TX path */
213 ipn3ke_xmac_tx_disable(hw, rpst->port_id, 0);
215 /* Disable the RX path */
216 ipn3ke_xmac_rx_disable(hw, rpst->port_id, 0);
223 ipn3ke_rpst_rx_queue_start(__rte_unused struct rte_eth_dev *dev,
224 __rte_unused uint16_t rx_queue_id)
230 ipn3ke_rpst_rx_queue_stop(__rte_unused struct rte_eth_dev *dev,
231 __rte_unused uint16_t rx_queue_id)
237 ipn3ke_rpst_tx_queue_start(__rte_unused struct rte_eth_dev *dev,
238 __rte_unused uint16_t tx_queue_id)
244 ipn3ke_rpst_tx_queue_stop(__rte_unused struct rte_eth_dev *dev,
245 __rte_unused uint16_t tx_queue_id)
251 ipn3ke_rpst_rx_queue_setup(__rte_unused struct rte_eth_dev *dev,
252 __rte_unused uint16_t queue_idx, __rte_unused uint16_t nb_desc,
253 __rte_unused unsigned int socket_id,
254 __rte_unused const struct rte_eth_rxconf *rx_conf,
255 __rte_unused struct rte_mempool *mp)
261 ipn3ke_rpst_rx_queue_release(__rte_unused void *rxq)
266 ipn3ke_rpst_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
267 __rte_unused uint16_t queue_idx, __rte_unused uint16_t nb_desc,
268 __rte_unused unsigned int socket_id,
269 __rte_unused const struct rte_eth_txconf *tx_conf)
275 ipn3ke_rpst_tx_queue_release(__rte_unused void *txq)
280 ipn3ke_rpst_stats_get(__rte_unused struct rte_eth_dev *ethdev,
281 __rte_unused struct rte_eth_stats *stats)
287 ipn3ke_rpst_xstats_get(__rte_unused struct rte_eth_dev *dev,
288 __rte_unused struct rte_eth_xstat *xstats, __rte_unused unsigned int n)
294 ipn3ke_rpst_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
295 __rte_unused struct rte_eth_xstat_name *xstats_names,
296 __rte_unused unsigned int limit)
302 ipn3ke_rpst_stats_reset(__rte_unused struct rte_eth_dev *ethdev)
307 ipn3ke_update_link(struct rte_rawdev *rawdev,
308 uint16_t port, struct rte_eth_link *link)
310 uint64_t line_link_bitmap = 0;
311 enum ifpga_rawdev_link_speed link_speed;
313 rawdev->dev_ops->attr_get(rawdev,
314 "LineSideLinkStatus",
315 (uint64_t *)&line_link_bitmap);
317 /* Parse the link status */
318 if ((1 << port) & line_link_bitmap)
319 link->link_status = 1;
321 link->link_status = 0;
323 IPN3KE_AFU_PMD_DEBUG("port is %d\n", port);
324 IPN3KE_AFU_PMD_DEBUG("link->link_status is %d\n", link->link_status);
326 rawdev->dev_ops->attr_get(rawdev,
328 (uint64_t *)&link_speed);
329 switch (link_speed) {
330 case IFPGA_RAWDEV_LINK_SPEED_10GB:
331 link->link_speed = ETH_SPEED_NUM_10G;
333 case IFPGA_RAWDEV_LINK_SPEED_25GB:
334 link->link_speed = ETH_SPEED_NUM_25G;
337 IPN3KE_AFU_PMD_ERR("Unknown link speed info %u", link_speed);
343 * Set device link up.
346 ipn3ke_rpst_dev_set_link_up(struct rte_eth_dev *dev)
348 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
349 struct rte_eth_dev *pf;
352 if (rpst->i40e_pf_eth) {
353 ret = rte_eth_dev_set_link_up(rpst->i40e_pf_eth_port_id);
354 pf = rpst->i40e_pf_eth;
355 (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1);
362 * Set device link down.
365 ipn3ke_rpst_dev_set_link_down(struct rte_eth_dev *dev)
367 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
368 struct rte_eth_dev *pf;
371 if (rpst->i40e_pf_eth) {
372 ret = rte_eth_dev_set_link_down(rpst->i40e_pf_eth_port_id);
373 pf = rpst->i40e_pf_eth;
374 (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1);
381 ipn3ke_rpst_link_update(struct rte_eth_dev *ethdev,
382 __rte_unused int wait_to_complete)
384 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
385 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
386 struct rte_rawdev *rawdev;
387 struct rte_eth_link link;
388 struct rte_eth_dev *pf;
390 memset(&link, 0, sizeof(link));
392 link.link_duplex = ETH_LINK_FULL_DUPLEX;
393 link.link_autoneg = !(ethdev->data->dev_conf.link_speeds &
394 ETH_LINK_SPEED_FIXED);
397 ipn3ke_update_link(rawdev, rpst->port_id, &link);
399 if (!rpst->ori_linfo.link_status &&
401 IPN3KE_AFU_PMD_DEBUG("Update Rpst %d Up\n", rpst->port_id);
402 rpst->ori_linfo.link_status = link.link_status;
403 rpst->ori_linfo.link_speed = link.link_speed;
405 rte_eth_linkstatus_set(ethdev, &link);
407 if (rpst->i40e_pf_eth) {
408 IPN3KE_AFU_PMD_DEBUG("Update FVL PF %d Up\n",
409 rpst->i40e_pf_eth_port_id);
410 rte_eth_dev_set_link_up(rpst->i40e_pf_eth_port_id);
411 pf = rpst->i40e_pf_eth;
412 (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1);
414 } else if (rpst->ori_linfo.link_status &&
416 IPN3KE_AFU_PMD_DEBUG("Update Rpst %d Down\n",
418 rpst->ori_linfo.link_status = link.link_status;
419 rpst->ori_linfo.link_speed = link.link_speed;
421 rte_eth_linkstatus_set(ethdev, &link);
423 if (rpst->i40e_pf_eth) {
424 IPN3KE_AFU_PMD_DEBUG("Update FVL PF %d Down\n",
425 rpst->i40e_pf_eth_port_id);
426 rte_eth_dev_set_link_down(rpst->i40e_pf_eth_port_id);
427 pf = rpst->i40e_pf_eth;
428 (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1);
436 ipn3ke_rpst_link_check(struct ipn3ke_rpst *rpst)
438 struct ipn3ke_hw *hw;
439 struct rte_rawdev *rawdev;
440 struct rte_eth_link link;
441 struct rte_eth_dev *pf;
448 memset(&link, 0, sizeof(link));
450 link.link_duplex = ETH_LINK_FULL_DUPLEX;
451 link.link_autoneg = !(rpst->ethdev->data->dev_conf.link_speeds &
452 ETH_LINK_SPEED_FIXED);
455 ipn3ke_update_link(rawdev, rpst->port_id, &link);
457 if (!rpst->ori_linfo.link_status &&
459 IPN3KE_AFU_PMD_DEBUG("Check Rpst %d Up\n", rpst->port_id);
460 rpst->ori_linfo.link_status = link.link_status;
461 rpst->ori_linfo.link_speed = link.link_speed;
463 rte_eth_linkstatus_set(rpst->ethdev, &link);
465 if (rpst->i40e_pf_eth) {
466 IPN3KE_AFU_PMD_DEBUG("Check FVL PF %d Up\n",
467 rpst->i40e_pf_eth_port_id);
468 rte_eth_dev_set_link_up(rpst->i40e_pf_eth_port_id);
469 pf = rpst->i40e_pf_eth;
470 (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1);
472 } else if (rpst->ori_linfo.link_status &&
474 IPN3KE_AFU_PMD_DEBUG("Check Rpst %d Down\n", rpst->port_id);
475 rpst->ori_linfo.link_status = link.link_status;
476 rpst->ori_linfo.link_speed = link.link_speed;
478 rte_eth_linkstatus_set(rpst->ethdev, &link);
480 if (rpst->i40e_pf_eth) {
481 IPN3KE_AFU_PMD_DEBUG("Check FVL PF %d Down\n",
482 rpst->i40e_pf_eth_port_id);
483 rte_eth_dev_set_link_down(rpst->i40e_pf_eth_port_id);
484 pf = rpst->i40e_pf_eth;
485 (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1);
493 ipn3ke_rpst_scan_handle_request(__rte_unused void *param)
495 struct ipn3ke_rpst *rpst;
502 TAILQ_FOREACH(rpst, &ipn3ke_rpst_list, next) {
503 if (rpst->i40e_pf_eth &&
504 rpst->ethdev->data->dev_started &&
505 rpst->i40e_pf_eth->data->dev_started)
506 ipn3ke_rpst_link_check(rpst);
508 if (++num > SCAN_NUM)
509 rte_delay_us(1 * MS);
511 rte_delay_us(50 * MS);
521 ipn3ke_rpst_scan_check(void)
525 if (ipn3ke_rpst_scan_num == 1) {
526 ret = pthread_create(&ipn3ke_rpst_scan_thread,
528 ipn3ke_rpst_scan_handle_request, NULL);
530 IPN3KE_AFU_PMD_ERR("Fail to create ipn3ke rpst scan thread");
533 } else if (ipn3ke_rpst_scan_num == 0) {
534 ret = pthread_cancel(ipn3ke_rpst_scan_thread);
536 IPN3KE_AFU_PMD_ERR("Can't cancel the thread");
538 ret = pthread_join(ipn3ke_rpst_scan_thread, NULL);
540 IPN3KE_AFU_PMD_ERR("Can't join the thread");
549 ipn3ke_rpst_promiscuous_enable(struct rte_eth_dev *ethdev)
551 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
552 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
553 uint32_t rddata, val;
555 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
556 /* Enable all unicast */
557 (*hw->f_mac_read)(hw,
559 IPN3KE_MAC_RX_FRAME_CONTROL,
563 val &= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLUCAST_MASK;
565 (*hw->f_mac_write)(hw,
567 IPN3KE_MAC_RX_FRAME_CONTROL,
574 ipn3ke_rpst_promiscuous_disable(struct rte_eth_dev *ethdev)
576 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
577 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
578 uint32_t rddata, val;
580 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
581 /* Disable all unicast */
582 (*hw->f_mac_read)(hw,
584 IPN3KE_MAC_RX_FRAME_CONTROL,
588 val &= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLUCAST_MASK;
590 (*hw->f_mac_write)(hw,
592 IPN3KE_MAC_RX_FRAME_CONTROL,
599 ipn3ke_rpst_allmulticast_enable(struct rte_eth_dev *ethdev)
601 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
602 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
603 uint32_t rddata, val;
605 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
606 /* Enable all unicast */
607 (*hw->f_mac_read)(hw,
609 IPN3KE_MAC_RX_FRAME_CONTROL,
613 val <<= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLMCAST_SHIFT;
614 val &= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLMCAST_MASK;
616 (*hw->f_mac_write)(hw,
618 IPN3KE_MAC_RX_FRAME_CONTROL,
625 ipn3ke_rpst_allmulticast_disable(struct rte_eth_dev *ethdev)
627 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
628 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
629 uint32_t rddata, val;
631 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
632 /* Disable all unicast */
633 (*hw->f_mac_read)(hw,
635 IPN3KE_MAC_RX_FRAME_CONTROL,
639 val <<= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLMCAST_SHIFT;
640 val &= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLMCAST_MASK;
642 (*hw->f_mac_write)(hw,
644 IPN3KE_MAC_RX_FRAME_CONTROL,
651 ipn3ke_rpst_mac_addr_set(struct rte_eth_dev *ethdev,
652 struct ether_addr *mac_addr)
654 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
655 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
658 if (!is_valid_assigned_ether_addr(mac_addr)) {
659 IPN3KE_AFU_PMD_ERR("Tried to set invalid MAC address.");
663 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
664 ether_addr_copy(&mac_addr[0], &rpst->mac_addr);
666 /* Set mac address */
667 rte_memcpy(((char *)(&val)), &mac_addr[0], sizeof(uint32_t));
668 (*hw->f_mac_write)(hw,
670 IPN3KE_MAC_PRIMARY_MAC_ADDR0,
673 rte_memcpy(((char *)(&val)), &mac_addr[4], sizeof(uint16_t));
674 (*hw->f_mac_write)(hw,
676 IPN3KE_MAC_PRIMARY_MAC_ADDR0,
685 ipn3ke_rpst_mtu_set(struct rte_eth_dev *ethdev, uint16_t mtu)
688 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
689 struct rte_eth_dev_data *dev_data = ethdev->data;
690 uint32_t frame_size = mtu + IPN3KE_ETH_OVERHEAD;
692 /* check if mtu is within the allowed range */
693 if (mtu < ETHER_MIN_MTU ||
694 frame_size > IPN3KE_MAC_FRAME_SIZE_MAX)
697 /* mtu setting is forbidden if port is start */
698 /* make sure NIC port is stopped */
699 if (rpst->i40e_pf_eth && rpst->i40e_pf_eth->data->dev_started) {
700 IPN3KE_AFU_PMD_ERR("NIC port %d must "
701 "be stopped before configuration",
702 rpst->i40e_pf_eth->data->port_id);
705 /* mtu setting is forbidden if port is start */
706 if (dev_data->dev_started) {
707 IPN3KE_AFU_PMD_ERR("FPGA port %d must "
708 "be stopped before configuration",
713 if (frame_size > ETHER_MAX_LEN)
714 dev_data->dev_conf.rxmode.offloads |=
715 (uint64_t)(DEV_RX_OFFLOAD_JUMBO_FRAME);
717 dev_data->dev_conf.rxmode.offloads &=
718 (uint64_t)(~DEV_RX_OFFLOAD_JUMBO_FRAME);
720 dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
722 if (rpst->i40e_pf_eth) {
723 ret = rpst->i40e_pf_eth->dev_ops->mtu_set(rpst->i40e_pf_eth,
726 rpst->i40e_pf_eth->data->mtu = mtu;
733 ipn3ke_afu_filter_ctrl(struct rte_eth_dev *ethdev,
734 enum rte_filter_type filter_type, enum rte_filter_op filter_op,
737 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
738 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
745 switch (filter_type) {
746 case RTE_ETH_FILTER_GENERIC:
747 if (filter_op != RTE_ETH_FILTER_GET)
749 *(const void **)arg = NULL;
752 IPN3KE_AFU_PMD_WARN("Filter type (%d) not supported",
757 else if (rpst->i40e_pf_eth)
758 (*rpst->i40e_pf_eth->dev_ops->filter_ctrl)(ethdev,
768 static const struct eth_dev_ops ipn3ke_rpst_dev_ops = {
769 .dev_infos_get = ipn3ke_rpst_dev_infos_get,
771 .dev_configure = ipn3ke_rpst_dev_configure,
772 .dev_start = ipn3ke_rpst_dev_start,
773 .dev_stop = ipn3ke_rpst_dev_stop,
774 .dev_close = ipn3ke_rpst_dev_close,
775 .dev_reset = ipn3ke_rpst_dev_reset,
777 .stats_get = ipn3ke_rpst_stats_get,
778 .xstats_get = ipn3ke_rpst_xstats_get,
779 .xstats_get_names = ipn3ke_rpst_xstats_get_names,
780 .stats_reset = ipn3ke_rpst_stats_reset,
781 .xstats_reset = ipn3ke_rpst_stats_reset,
783 .filter_ctrl = ipn3ke_afu_filter_ctrl,
785 .rx_queue_start = ipn3ke_rpst_rx_queue_start,
786 .rx_queue_stop = ipn3ke_rpst_rx_queue_stop,
787 .tx_queue_start = ipn3ke_rpst_tx_queue_start,
788 .tx_queue_stop = ipn3ke_rpst_tx_queue_stop,
789 .rx_queue_setup = ipn3ke_rpst_rx_queue_setup,
790 .rx_queue_release = ipn3ke_rpst_rx_queue_release,
791 .tx_queue_setup = ipn3ke_rpst_tx_queue_setup,
792 .tx_queue_release = ipn3ke_rpst_tx_queue_release,
794 .dev_set_link_up = ipn3ke_rpst_dev_set_link_up,
795 .dev_set_link_down = ipn3ke_rpst_dev_set_link_down,
796 .link_update = ipn3ke_rpst_link_update,
798 .promiscuous_enable = ipn3ke_rpst_promiscuous_enable,
799 .promiscuous_disable = ipn3ke_rpst_promiscuous_disable,
800 .allmulticast_enable = ipn3ke_rpst_allmulticast_enable,
801 .allmulticast_disable = ipn3ke_rpst_allmulticast_disable,
802 .mac_addr_set = ipn3ke_rpst_mac_addr_set,
803 .mtu_set = ipn3ke_rpst_mtu_set,
806 static uint16_t ipn3ke_rpst_recv_pkts(__rte_unused void *rx_q,
807 __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts)
813 ipn3ke_rpst_xmit_pkts(__rte_unused void *tx_queue,
814 __rte_unused struct rte_mbuf **tx_pkts, __rte_unused uint16_t nb_pkts)
820 ipn3ke_rpst_init(struct rte_eth_dev *ethdev, void *init_params)
822 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
823 struct ipn3ke_rpst *representor_param =
824 (struct ipn3ke_rpst *)init_params;
826 if (representor_param->port_id >= representor_param->hw->port_num)
829 rpst->ethdev = ethdev;
830 rpst->switch_domain_id = representor_param->switch_domain_id;
831 rpst->port_id = representor_param->port_id;
832 rpst->hw = representor_param->hw;
833 rpst->i40e_pf_eth = NULL;
834 rpst->i40e_pf_eth_port_id = 0xFFFF;
836 ethdev->data->mac_addrs = rte_zmalloc("ipn3ke", ETHER_ADDR_LEN, 0);
837 if (!ethdev->data->mac_addrs) {
838 IPN3KE_AFU_PMD_ERR("Failed to "
839 "allocated memory for storing mac address");
843 /* Set representor device ops */
844 ethdev->dev_ops = &ipn3ke_rpst_dev_ops;
846 /* No data-path, but need stub Rx/Tx functions to avoid crash
847 * when testing with the likes of testpmd.
849 ethdev->rx_pkt_burst = ipn3ke_rpst_recv_pkts;
850 ethdev->tx_pkt_burst = ipn3ke_rpst_xmit_pkts;
852 ethdev->data->nb_rx_queues = 1;
853 ethdev->data->nb_tx_queues = 1;
855 ethdev->data->mac_addrs = rte_zmalloc("ipn3ke_afu_representor",
858 if (!ethdev->data->mac_addrs) {
859 IPN3KE_AFU_PMD_ERR("Failed to "
860 "allocated memory for storing mac address");
864 ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
866 rte_spinlock_lock(&ipn3ke_link_notify_list_lk);
867 TAILQ_INSERT_TAIL(&ipn3ke_rpst_list, rpst, next);
868 ipn3ke_rpst_scan_num++;
869 ipn3ke_rpst_scan_check();
870 rte_spinlock_unlock(&ipn3ke_link_notify_list_lk);
876 ipn3ke_rpst_uninit(struct rte_eth_dev *ethdev)
878 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
880 rte_spinlock_lock(&ipn3ke_link_notify_list_lk);
881 TAILQ_REMOVE(&ipn3ke_rpst_list, rpst, next);
882 ipn3ke_rpst_scan_num--;
883 ipn3ke_rpst_scan_check();
884 rte_spinlock_unlock(&ipn3ke_link_notify_list_lk);