1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
7 #include <rte_bus_pci.h>
8 #include <rte_ethdev.h>
10 #include <rte_malloc.h>
13 #include <rte_sched.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_spinlock.h>
18 #include <rte_rawdev.h>
19 #include <rte_rawdev_pmd.h>
20 #include <rte_bus_ifpga.h>
21 #include <ifpga_logs.h>
23 #include "ipn3ke_rawdev_api.h"
24 #include "ipn3ke_flow.h"
25 #include "ipn3ke_logs.h"
26 #include "ipn3ke_ethdev.h"
28 static int ipn3ke_rpst_scan_num;
29 static pthread_t ipn3ke_rpst_scan_thread;
31 /** Double linked list of representor port. */
32 TAILQ_HEAD(ipn3ke_rpst_list, ipn3ke_rpst);
34 static struct ipn3ke_rpst_list ipn3ke_rpst_list =
35 TAILQ_HEAD_INITIALIZER(ipn3ke_rpst_list);
37 static rte_spinlock_t ipn3ke_link_notify_list_lk = RTE_SPINLOCK_INITIALIZER;
40 ipn3ke_rpst_link_check(struct ipn3ke_rpst *rpst);
43 ipn3ke_rpst_dev_infos_get(struct rte_eth_dev *ethdev,
44 struct rte_eth_dev_info *dev_info)
46 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
47 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
49 dev_info->speed_capa =
50 (hw->retimer.mac_type ==
51 IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) ?
53 ((hw->retimer.mac_type ==
54 IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) ?
56 ETH_LINK_SPEED_AUTONEG);
58 dev_info->max_rx_queues = 1;
59 dev_info->max_tx_queues = 1;
60 dev_info->min_rx_bufsize = IPN3KE_AFU_BUF_SIZE_MIN;
61 dev_info->max_rx_pktlen = IPN3KE_AFU_FRAME_SIZE_MAX;
62 dev_info->max_mac_addrs = hw->port_num;
63 dev_info->max_vfs = 0;
64 dev_info->default_txconf = (struct rte_eth_txconf) {
67 dev_info->rx_queue_offload_capa = 0;
68 dev_info->rx_offload_capa =
69 DEV_RX_OFFLOAD_VLAN_STRIP |
70 DEV_RX_OFFLOAD_QINQ_STRIP |
71 DEV_RX_OFFLOAD_IPV4_CKSUM |
72 DEV_RX_OFFLOAD_UDP_CKSUM |
73 DEV_RX_OFFLOAD_TCP_CKSUM |
74 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
75 DEV_RX_OFFLOAD_VLAN_EXTEND |
76 DEV_RX_OFFLOAD_VLAN_FILTER |
77 DEV_RX_OFFLOAD_JUMBO_FRAME;
79 dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
80 dev_info->tx_offload_capa =
81 DEV_TX_OFFLOAD_VLAN_INSERT |
82 DEV_TX_OFFLOAD_QINQ_INSERT |
83 DEV_TX_OFFLOAD_IPV4_CKSUM |
84 DEV_TX_OFFLOAD_UDP_CKSUM |
85 DEV_TX_OFFLOAD_TCP_CKSUM |
86 DEV_TX_OFFLOAD_SCTP_CKSUM |
87 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
88 DEV_TX_OFFLOAD_TCP_TSO |
89 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
90 DEV_TX_OFFLOAD_GRE_TNL_TSO |
91 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
92 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
93 DEV_TX_OFFLOAD_MULTI_SEGS |
94 dev_info->tx_queue_offload_capa;
97 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
98 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
100 dev_info->switch_info.name = ethdev->device->name;
101 dev_info->switch_info.domain_id = rpst->switch_domain_id;
102 dev_info->switch_info.port_id = rpst->port_id;
106 ipn3ke_rpst_dev_configure(__rte_unused struct rte_eth_dev *dev)
112 ipn3ke_rpst_dev_start(struct rte_eth_dev *dev)
114 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
115 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
116 struct rte_rawdev *rawdev;
119 char attr_name[IPN3KE_RAWDEV_ATTR_LEN_MAX];
123 memset(attr_name, 0, sizeof(attr_name));
124 snprintf(attr_name, IPN3KE_RAWDEV_ATTR_LEN_MAX, "%s",
126 rawdev->dev_ops->attr_get(rawdev, attr_name, &base_mac);
127 rte_ether_addr_copy((struct rte_ether_addr *)&base_mac,
130 rte_ether_addr_copy(&rpst->mac_addr, &dev->data->mac_addrs[0]);
131 dev->data->mac_addrs->addr_bytes[RTE_ETHER_ADDR_LEN - 1] =
132 (uint8_t)rpst->port_id + 1;
134 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
135 /* Set mac address */
136 rte_memcpy(((char *)(&val)),
137 (char *)&dev->data->mac_addrs->addr_bytes[0],
139 (*hw->f_mac_write)(hw,
141 IPN3KE_MAC_PRIMARY_MAC_ADDR0,
144 rte_memcpy(((char *)(&val)),
145 (char *)&dev->data->mac_addrs->addr_bytes[4],
147 (*hw->f_mac_write)(hw,
149 IPN3KE_MAC_PRIMARY_MAC_ADDR1,
153 /* Enable the TX path */
154 ipn3ke_xmac_tx_enable(hw, rpst->port_id, 0);
156 /* Disables source address override */
157 ipn3ke_xmac_smac_ovd_dis(hw, rpst->port_id, 0);
159 /* Enable the RX path */
160 ipn3ke_xmac_rx_enable(hw, rpst->port_id, 0);
162 /* Clear all TX statistics counters */
163 ipn3ke_xmac_tx_clr_stcs(hw, rpst->port_id, 0);
165 /* Clear all RX statistics counters */
166 ipn3ke_xmac_rx_clr_stcs(hw, rpst->port_id, 0);
169 ipn3ke_rpst_link_update(dev, 0);
175 ipn3ke_rpst_dev_stop(struct rte_eth_dev *dev)
177 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
178 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
180 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
181 /* Disable the TX path */
182 ipn3ke_xmac_tx_disable(hw, rpst->port_id, 0);
184 /* Disable the RX path */
185 ipn3ke_xmac_rx_disable(hw, rpst->port_id, 0);
190 ipn3ke_rpst_dev_close(struct rte_eth_dev *dev)
192 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
193 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
195 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
196 /* Disable the TX path */
197 ipn3ke_xmac_tx_disable(hw, rpst->port_id, 0);
199 /* Disable the RX path */
200 ipn3ke_xmac_rx_disable(hw, rpst->port_id, 0);
205 * Reset PF device only to re-initialize resources in PMD layer
208 ipn3ke_rpst_dev_reset(struct rte_eth_dev *dev)
210 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
211 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
213 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
214 /* Disable the TX path */
215 ipn3ke_xmac_tx_disable(hw, rpst->port_id, 0);
217 /* Disable the RX path */
218 ipn3ke_xmac_rx_disable(hw, rpst->port_id, 0);
225 ipn3ke_rpst_rx_queue_start(__rte_unused struct rte_eth_dev *dev,
226 __rte_unused uint16_t rx_queue_id)
232 ipn3ke_rpst_rx_queue_stop(__rte_unused struct rte_eth_dev *dev,
233 __rte_unused uint16_t rx_queue_id)
239 ipn3ke_rpst_tx_queue_start(__rte_unused struct rte_eth_dev *dev,
240 __rte_unused uint16_t tx_queue_id)
246 ipn3ke_rpst_tx_queue_stop(__rte_unused struct rte_eth_dev *dev,
247 __rte_unused uint16_t tx_queue_id)
253 ipn3ke_rpst_rx_queue_setup(__rte_unused struct rte_eth_dev *dev,
254 __rte_unused uint16_t queue_idx, __rte_unused uint16_t nb_desc,
255 __rte_unused unsigned int socket_id,
256 __rte_unused const struct rte_eth_rxconf *rx_conf,
257 __rte_unused struct rte_mempool *mp)
263 ipn3ke_rpst_rx_queue_release(__rte_unused void *rxq)
268 ipn3ke_rpst_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
269 __rte_unused uint16_t queue_idx, __rte_unused uint16_t nb_desc,
270 __rte_unused unsigned int socket_id,
271 __rte_unused const struct rte_eth_txconf *tx_conf)
277 ipn3ke_rpst_tx_queue_release(__rte_unused void *txq)
282 ipn3ke_rpst_stats_get(__rte_unused struct rte_eth_dev *ethdev,
283 __rte_unused struct rte_eth_stats *stats)
289 ipn3ke_rpst_xstats_get(__rte_unused struct rte_eth_dev *dev,
290 __rte_unused struct rte_eth_xstat *xstats, __rte_unused unsigned int n)
296 ipn3ke_rpst_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
297 __rte_unused struct rte_eth_xstat_name *xstats_names,
298 __rte_unused unsigned int limit)
304 ipn3ke_rpst_stats_reset(__rte_unused struct rte_eth_dev *ethdev)
309 ipn3ke_update_link(struct rte_rawdev *rawdev,
310 uint16_t port, struct rte_eth_link *link)
312 uint64_t line_link_bitmap = 0;
313 enum ifpga_rawdev_link_speed link_speed;
315 rawdev->dev_ops->attr_get(rawdev,
316 "LineSideLinkStatus",
317 (uint64_t *)&line_link_bitmap);
319 /* Parse the link status */
320 if ((1 << port) & line_link_bitmap)
321 link->link_status = 1;
323 link->link_status = 0;
325 IPN3KE_AFU_PMD_DEBUG("port is %d\n", port);
326 IPN3KE_AFU_PMD_DEBUG("link->link_status is %d\n", link->link_status);
328 rawdev->dev_ops->attr_get(rawdev,
330 (uint64_t *)&link_speed);
331 switch (link_speed) {
332 case IFPGA_RAWDEV_LINK_SPEED_10GB:
333 link->link_speed = ETH_SPEED_NUM_10G;
335 case IFPGA_RAWDEV_LINK_SPEED_25GB:
336 link->link_speed = ETH_SPEED_NUM_25G;
339 IPN3KE_AFU_PMD_ERR("Unknown link speed info %u", link_speed);
345 * Set device link up.
348 ipn3ke_rpst_dev_set_link_up(struct rte_eth_dev *dev)
350 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
351 struct rte_eth_dev *pf;
354 if (rpst->i40e_pf_eth) {
355 ret = rte_eth_dev_set_link_up(rpst->i40e_pf_eth_port_id);
356 pf = rpst->i40e_pf_eth;
357 (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1);
364 * Set device link down.
367 ipn3ke_rpst_dev_set_link_down(struct rte_eth_dev *dev)
369 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
370 struct rte_eth_dev *pf;
373 if (rpst->i40e_pf_eth) {
374 ret = rte_eth_dev_set_link_down(rpst->i40e_pf_eth_port_id);
375 pf = rpst->i40e_pf_eth;
376 (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1);
383 ipn3ke_rpst_link_update(struct rte_eth_dev *ethdev,
384 __rte_unused int wait_to_complete)
386 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
387 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
388 struct rte_rawdev *rawdev;
389 struct rte_eth_link link;
390 struct rte_eth_dev *pf;
392 memset(&link, 0, sizeof(link));
394 link.link_duplex = ETH_LINK_FULL_DUPLEX;
395 link.link_autoneg = !(ethdev->data->dev_conf.link_speeds &
396 ETH_LINK_SPEED_FIXED);
399 ipn3ke_update_link(rawdev, rpst->port_id, &link);
401 if (!rpst->ori_linfo.link_status &&
403 IPN3KE_AFU_PMD_DEBUG("Update Rpst %d Up\n", rpst->port_id);
404 rpst->ori_linfo.link_status = link.link_status;
405 rpst->ori_linfo.link_speed = link.link_speed;
407 rte_eth_linkstatus_set(ethdev, &link);
409 if (rpst->i40e_pf_eth) {
410 IPN3KE_AFU_PMD_DEBUG("Update FVL PF %d Up\n",
411 rpst->i40e_pf_eth_port_id);
412 rte_eth_dev_set_link_up(rpst->i40e_pf_eth_port_id);
413 pf = rpst->i40e_pf_eth;
414 (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1);
416 } else if (rpst->ori_linfo.link_status &&
418 IPN3KE_AFU_PMD_DEBUG("Update Rpst %d Down\n",
420 rpst->ori_linfo.link_status = link.link_status;
421 rpst->ori_linfo.link_speed = link.link_speed;
423 rte_eth_linkstatus_set(ethdev, &link);
425 if (rpst->i40e_pf_eth) {
426 IPN3KE_AFU_PMD_DEBUG("Update FVL PF %d Down\n",
427 rpst->i40e_pf_eth_port_id);
428 rte_eth_dev_set_link_down(rpst->i40e_pf_eth_port_id);
429 pf = rpst->i40e_pf_eth;
430 (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1);
438 ipn3ke_rpst_link_check(struct ipn3ke_rpst *rpst)
440 struct ipn3ke_hw *hw;
441 struct rte_rawdev *rawdev;
442 struct rte_eth_link link;
443 struct rte_eth_dev *pf;
450 memset(&link, 0, sizeof(link));
452 link.link_duplex = ETH_LINK_FULL_DUPLEX;
453 link.link_autoneg = !(rpst->ethdev->data->dev_conf.link_speeds &
454 ETH_LINK_SPEED_FIXED);
457 ipn3ke_update_link(rawdev, rpst->port_id, &link);
459 if (!rpst->ori_linfo.link_status &&
461 IPN3KE_AFU_PMD_DEBUG("Check Rpst %d Up\n", rpst->port_id);
462 rpst->ori_linfo.link_status = link.link_status;
463 rpst->ori_linfo.link_speed = link.link_speed;
465 rte_eth_linkstatus_set(rpst->ethdev, &link);
467 if (rpst->i40e_pf_eth) {
468 IPN3KE_AFU_PMD_DEBUG("Check FVL PF %d Up\n",
469 rpst->i40e_pf_eth_port_id);
470 rte_eth_dev_set_link_up(rpst->i40e_pf_eth_port_id);
471 pf = rpst->i40e_pf_eth;
472 (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1);
474 } else if (rpst->ori_linfo.link_status &&
476 IPN3KE_AFU_PMD_DEBUG("Check Rpst %d Down\n", rpst->port_id);
477 rpst->ori_linfo.link_status = link.link_status;
478 rpst->ori_linfo.link_speed = link.link_speed;
480 rte_eth_linkstatus_set(rpst->ethdev, &link);
482 if (rpst->i40e_pf_eth) {
483 IPN3KE_AFU_PMD_DEBUG("Check FVL PF %d Down\n",
484 rpst->i40e_pf_eth_port_id);
485 rte_eth_dev_set_link_down(rpst->i40e_pf_eth_port_id);
486 pf = rpst->i40e_pf_eth;
487 (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1);
495 ipn3ke_rpst_scan_handle_request(__rte_unused void *param)
497 struct ipn3ke_rpst *rpst;
504 TAILQ_FOREACH(rpst, &ipn3ke_rpst_list, next) {
505 if (rpst->i40e_pf_eth &&
506 rpst->ethdev->data->dev_started &&
507 rpst->i40e_pf_eth->data->dev_started)
508 ipn3ke_rpst_link_check(rpst);
510 if (++num > SCAN_NUM)
511 rte_delay_us(1 * MS);
513 rte_delay_us(50 * MS);
523 ipn3ke_rpst_scan_check(void)
527 if (ipn3ke_rpst_scan_num == 1) {
528 ret = pthread_create(&ipn3ke_rpst_scan_thread,
530 ipn3ke_rpst_scan_handle_request, NULL);
532 IPN3KE_AFU_PMD_ERR("Fail to create ipn3ke rpst scan thread");
535 } else if (ipn3ke_rpst_scan_num == 0) {
536 ret = pthread_cancel(ipn3ke_rpst_scan_thread);
538 IPN3KE_AFU_PMD_ERR("Can't cancel the thread");
540 ret = pthread_join(ipn3ke_rpst_scan_thread, NULL);
542 IPN3KE_AFU_PMD_ERR("Can't join the thread");
551 ipn3ke_rpst_promiscuous_enable(struct rte_eth_dev *ethdev)
553 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
554 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
555 uint32_t rddata, val;
557 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
558 /* Enable all unicast */
559 (*hw->f_mac_read)(hw,
561 IPN3KE_MAC_RX_FRAME_CONTROL,
565 val &= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLUCAST_MASK;
567 (*hw->f_mac_write)(hw,
569 IPN3KE_MAC_RX_FRAME_CONTROL,
576 ipn3ke_rpst_promiscuous_disable(struct rte_eth_dev *ethdev)
578 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
579 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
580 uint32_t rddata, val;
582 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
583 /* Disable all unicast */
584 (*hw->f_mac_read)(hw,
586 IPN3KE_MAC_RX_FRAME_CONTROL,
590 val &= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLUCAST_MASK;
592 (*hw->f_mac_write)(hw,
594 IPN3KE_MAC_RX_FRAME_CONTROL,
601 ipn3ke_rpst_allmulticast_enable(struct rte_eth_dev *ethdev)
603 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
604 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
605 uint32_t rddata, val;
607 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
608 /* Enable all unicast */
609 (*hw->f_mac_read)(hw,
611 IPN3KE_MAC_RX_FRAME_CONTROL,
615 val <<= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLMCAST_SHIFT;
616 val &= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLMCAST_MASK;
618 (*hw->f_mac_write)(hw,
620 IPN3KE_MAC_RX_FRAME_CONTROL,
627 ipn3ke_rpst_allmulticast_disable(struct rte_eth_dev *ethdev)
629 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
630 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
631 uint32_t rddata, val;
633 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
634 /* Disable all unicast */
635 (*hw->f_mac_read)(hw,
637 IPN3KE_MAC_RX_FRAME_CONTROL,
641 val <<= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLMCAST_SHIFT;
642 val &= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLMCAST_MASK;
644 (*hw->f_mac_write)(hw,
646 IPN3KE_MAC_RX_FRAME_CONTROL,
653 ipn3ke_rpst_mac_addr_set(struct rte_eth_dev *ethdev,
654 struct rte_ether_addr *mac_addr)
656 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
657 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
660 if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
661 IPN3KE_AFU_PMD_ERR("Tried to set invalid MAC address.");
665 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
666 rte_ether_addr_copy(&mac_addr[0], &rpst->mac_addr);
668 /* Set mac address */
669 rte_memcpy(((char *)(&val)), &mac_addr[0], sizeof(uint32_t));
670 (*hw->f_mac_write)(hw,
672 IPN3KE_MAC_PRIMARY_MAC_ADDR0,
675 rte_memcpy(((char *)(&val)), &mac_addr[4], sizeof(uint16_t));
676 (*hw->f_mac_write)(hw,
678 IPN3KE_MAC_PRIMARY_MAC_ADDR0,
687 ipn3ke_rpst_mtu_set(struct rte_eth_dev *ethdev, uint16_t mtu)
690 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
691 struct rte_eth_dev_data *dev_data = ethdev->data;
692 uint32_t frame_size = mtu + IPN3KE_ETH_OVERHEAD;
694 /* check if mtu is within the allowed range */
695 if (mtu < RTE_ETHER_MIN_MTU ||
696 frame_size > IPN3KE_MAC_FRAME_SIZE_MAX)
699 /* mtu setting is forbidden if port is start */
700 /* make sure NIC port is stopped */
701 if (rpst->i40e_pf_eth && rpst->i40e_pf_eth->data->dev_started) {
702 IPN3KE_AFU_PMD_ERR("NIC port %d must "
703 "be stopped before configuration",
704 rpst->i40e_pf_eth->data->port_id);
707 /* mtu setting is forbidden if port is start */
708 if (dev_data->dev_started) {
709 IPN3KE_AFU_PMD_ERR("FPGA port %d must "
710 "be stopped before configuration",
715 if (frame_size > RTE_ETHER_MAX_LEN)
716 dev_data->dev_conf.rxmode.offloads |=
717 (uint64_t)(DEV_RX_OFFLOAD_JUMBO_FRAME);
719 dev_data->dev_conf.rxmode.offloads &=
720 (uint64_t)(~DEV_RX_OFFLOAD_JUMBO_FRAME);
722 dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
724 if (rpst->i40e_pf_eth) {
725 ret = rpst->i40e_pf_eth->dev_ops->mtu_set(rpst->i40e_pf_eth,
728 rpst->i40e_pf_eth->data->mtu = mtu;
735 ipn3ke_afu_filter_ctrl(struct rte_eth_dev *ethdev,
736 enum rte_filter_type filter_type, enum rte_filter_op filter_op,
740 struct ipn3ke_hw *hw;
741 struct ipn3ke_rpst *rpst;
746 hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
747 rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
750 switch (filter_type) {
751 case RTE_ETH_FILTER_GENERIC:
752 if (filter_op != RTE_ETH_FILTER_GET)
754 *(const void **)arg = &ipn3ke_flow_ops;
757 IPN3KE_AFU_PMD_WARN("Filter type (%d) not supported",
762 else if (rpst->i40e_pf_eth)
763 (*rpst->i40e_pf_eth->dev_ops->filter_ctrl)(ethdev,
773 static const struct eth_dev_ops ipn3ke_rpst_dev_ops = {
774 .dev_infos_get = ipn3ke_rpst_dev_infos_get,
776 .dev_configure = ipn3ke_rpst_dev_configure,
777 .dev_start = ipn3ke_rpst_dev_start,
778 .dev_stop = ipn3ke_rpst_dev_stop,
779 .dev_close = ipn3ke_rpst_dev_close,
780 .dev_reset = ipn3ke_rpst_dev_reset,
782 .stats_get = ipn3ke_rpst_stats_get,
783 .xstats_get = ipn3ke_rpst_xstats_get,
784 .xstats_get_names = ipn3ke_rpst_xstats_get_names,
785 .stats_reset = ipn3ke_rpst_stats_reset,
786 .xstats_reset = ipn3ke_rpst_stats_reset,
788 .filter_ctrl = ipn3ke_afu_filter_ctrl,
790 .rx_queue_start = ipn3ke_rpst_rx_queue_start,
791 .rx_queue_stop = ipn3ke_rpst_rx_queue_stop,
792 .tx_queue_start = ipn3ke_rpst_tx_queue_start,
793 .tx_queue_stop = ipn3ke_rpst_tx_queue_stop,
794 .rx_queue_setup = ipn3ke_rpst_rx_queue_setup,
795 .rx_queue_release = ipn3ke_rpst_rx_queue_release,
796 .tx_queue_setup = ipn3ke_rpst_tx_queue_setup,
797 .tx_queue_release = ipn3ke_rpst_tx_queue_release,
799 .dev_set_link_up = ipn3ke_rpst_dev_set_link_up,
800 .dev_set_link_down = ipn3ke_rpst_dev_set_link_down,
801 .link_update = ipn3ke_rpst_link_update,
803 .promiscuous_enable = ipn3ke_rpst_promiscuous_enable,
804 .promiscuous_disable = ipn3ke_rpst_promiscuous_disable,
805 .allmulticast_enable = ipn3ke_rpst_allmulticast_enable,
806 .allmulticast_disable = ipn3ke_rpst_allmulticast_disable,
807 .mac_addr_set = ipn3ke_rpst_mac_addr_set,
808 .mtu_set = ipn3ke_rpst_mtu_set,
810 .tm_ops_get = ipn3ke_tm_ops_get,
813 static uint16_t ipn3ke_rpst_recv_pkts(__rte_unused void *rx_q,
814 __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts)
820 ipn3ke_rpst_xmit_pkts(__rte_unused void *tx_queue,
821 __rte_unused struct rte_mbuf **tx_pkts, __rte_unused uint16_t nb_pkts)
827 ipn3ke_rpst_init(struct rte_eth_dev *ethdev, void *init_params)
829 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
830 struct ipn3ke_rpst *representor_param =
831 (struct ipn3ke_rpst *)init_params;
833 if (representor_param->port_id >= representor_param->hw->port_num)
836 rpst->ethdev = ethdev;
837 rpst->switch_domain_id = representor_param->switch_domain_id;
838 rpst->port_id = representor_param->port_id;
839 rpst->hw = representor_param->hw;
840 rpst->i40e_pf_eth = NULL;
841 rpst->i40e_pf_eth_port_id = 0xFFFF;
843 ethdev->data->mac_addrs = rte_zmalloc("ipn3ke", RTE_ETHER_ADDR_LEN, 0);
844 if (!ethdev->data->mac_addrs) {
845 IPN3KE_AFU_PMD_ERR("Failed to "
846 "allocated memory for storing mac address");
850 if (rpst->hw->tm_hw_enable)
851 ipn3ke_tm_init(rpst);
853 /* Set representor device ops */
854 ethdev->dev_ops = &ipn3ke_rpst_dev_ops;
856 /* No data-path, but need stub Rx/Tx functions to avoid crash
857 * when testing with the likes of testpmd.
859 ethdev->rx_pkt_burst = ipn3ke_rpst_recv_pkts;
860 ethdev->tx_pkt_burst = ipn3ke_rpst_xmit_pkts;
862 ethdev->data->nb_rx_queues = 1;
863 ethdev->data->nb_tx_queues = 1;
865 ethdev->data->mac_addrs = rte_zmalloc("ipn3ke_afu_representor",
868 if (!ethdev->data->mac_addrs) {
869 IPN3KE_AFU_PMD_ERR("Failed to "
870 "allocated memory for storing mac address");
874 ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
876 rte_spinlock_lock(&ipn3ke_link_notify_list_lk);
877 TAILQ_INSERT_TAIL(&ipn3ke_rpst_list, rpst, next);
878 ipn3ke_rpst_scan_num++;
879 ipn3ke_rpst_scan_check();
880 rte_spinlock_unlock(&ipn3ke_link_notify_list_lk);
886 ipn3ke_rpst_uninit(struct rte_eth_dev *ethdev)
888 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
890 rte_spinlock_lock(&ipn3ke_link_notify_list_lk);
891 TAILQ_REMOVE(&ipn3ke_rpst_list, rpst, next);
892 ipn3ke_rpst_scan_num--;
893 ipn3ke_rpst_scan_check();
894 rte_spinlock_unlock(&ipn3ke_link_notify_list_lk);