1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include <cnxk_ethdev.h>
8 cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
10 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
11 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
14 max_rx_pktlen = (roc_nix_max_pkt_len(&dev->nix) + RTE_ETHER_CRC_LEN -
15 CNXK_NIX_MAX_VTAG_ACT_SIZE);
17 devinfo->min_rx_bufsize = NIX_MIN_HW_FRS + RTE_ETHER_CRC_LEN;
18 devinfo->max_rx_pktlen = max_rx_pktlen;
19 devinfo->max_rx_queues = RTE_MAX_QUEUES_PER_PORT;
20 devinfo->max_tx_queues = RTE_MAX_QUEUES_PER_PORT;
21 devinfo->max_mac_addrs = dev->max_mac_entries;
22 devinfo->max_vfs = pci_dev->max_vfs;
23 devinfo->max_mtu = devinfo->max_rx_pktlen -
24 (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN);
25 devinfo->min_mtu = devinfo->min_rx_bufsize - CNXK_NIX_L2_OVERHEAD;
27 devinfo->rx_offload_capa = dev->rx_offload_capa;
28 devinfo->tx_offload_capa = dev->tx_offload_capa;
29 devinfo->rx_queue_offload_capa = 0;
30 devinfo->tx_queue_offload_capa = 0;
32 devinfo->reta_size = dev->nix.reta_sz;
33 devinfo->hash_key_size = ROC_NIX_RSS_KEY_LEN;
34 devinfo->flow_type_rss_offloads = CNXK_NIX_RSS_OFFLOAD;
36 devinfo->default_rxconf = (struct rte_eth_rxconf){
41 devinfo->default_txconf = (struct rte_eth_txconf){
45 devinfo->default_rxportconf = (struct rte_eth_dev_portconf){
46 .ring_size = CNXK_NIX_RX_DEFAULT_RING_SZ,
49 devinfo->rx_desc_lim = (struct rte_eth_desc_lim){
51 .nb_min = CNXK_NIX_RX_MIN_DESC,
52 .nb_align = CNXK_NIX_RX_MIN_DESC_ALIGN,
53 .nb_seg_max = CNXK_NIX_RX_NB_SEG_MAX,
54 .nb_mtu_seg_max = CNXK_NIX_RX_NB_SEG_MAX,
56 devinfo->rx_desc_lim.nb_max =
57 RTE_ALIGN_MUL_FLOOR(devinfo->rx_desc_lim.nb_max,
58 CNXK_NIX_RX_MIN_DESC_ALIGN);
60 devinfo->tx_desc_lim = (struct rte_eth_desc_lim){
64 .nb_seg_max = CNXK_NIX_TX_NB_SEG_MAX,
65 .nb_mtu_seg_max = CNXK_NIX_TX_NB_SEG_MAX,
68 devinfo->speed_capa = dev->speed_capa;
69 devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
70 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
75 cnxk_nix_rx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
76 struct rte_eth_burst_mode *mode)
78 ssize_t bytes = 0, str_size = RTE_ETH_BURST_MODE_INFO_SIZE, rc;
79 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
80 const struct burst_info {
83 } rx_offload_map[] = {
84 {DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN Strip,"},
85 {DEV_RX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
86 {DEV_RX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
87 {DEV_RX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
88 {DEV_RX_OFFLOAD_TCP_LRO, " TCP LRO,"},
89 {DEV_RX_OFFLOAD_QINQ_STRIP, " QinQ VLAN Strip,"},
90 {DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
91 {DEV_RX_OFFLOAD_MACSEC_STRIP, " MACsec Strip,"},
92 {DEV_RX_OFFLOAD_HEADER_SPLIT, " Header Split,"},
93 {DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN Filter,"},
94 {DEV_RX_OFFLOAD_VLAN_EXTEND, " VLAN Extend,"},
95 {DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo Frame,"},
96 {DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
97 {DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
98 {DEV_RX_OFFLOAD_SECURITY, " Security,"},
99 {DEV_RX_OFFLOAD_KEEP_CRC, " Keep CRC,"},
100 {DEV_RX_OFFLOAD_SCTP_CKSUM, " SCTP,"},
101 {DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
102 {DEV_RX_OFFLOAD_RSS_HASH, " RSS,"}
104 static const char *const burst_mode[] = {"Vector Neon, Rx Offloads:",
105 "Scalar, Rx Offloads:"
109 PLT_SET_USED(queue_id);
111 /* Update burst mode info */
112 rc = rte_strscpy(mode->info + bytes, burst_mode[dev->scalar_ena],
119 /* Update Rx offload info */
120 for (i = 0; i < RTE_DIM(rx_offload_map); i++) {
121 if (dev->rx_offloads & rx_offload_map[i].flags) {
122 rc = rte_strscpy(mode->info + bytes,
123 rx_offload_map[i].output,
137 cnxk_nix_tx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
138 struct rte_eth_burst_mode *mode)
140 ssize_t bytes = 0, str_size = RTE_ETH_BURST_MODE_INFO_SIZE, rc;
141 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
142 const struct burst_info {
145 } tx_offload_map[] = {
146 {DEV_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
147 {DEV_TX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
148 {DEV_TX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
149 {DEV_TX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
150 {DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP Checksum,"},
151 {DEV_TX_OFFLOAD_TCP_TSO, " TCP TSO,"},
152 {DEV_TX_OFFLOAD_UDP_TSO, " UDP TSO,"},
153 {DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
154 {DEV_TX_OFFLOAD_QINQ_INSERT, " QinQ VLAN Insert,"},
155 {DEV_TX_OFFLOAD_VXLAN_TNL_TSO, " VXLAN Tunnel TSO,"},
156 {DEV_TX_OFFLOAD_GRE_TNL_TSO, " GRE Tunnel TSO,"},
157 {DEV_TX_OFFLOAD_IPIP_TNL_TSO, " IP-in-IP Tunnel TSO,"},
158 {DEV_TX_OFFLOAD_GENEVE_TNL_TSO, " Geneve Tunnel TSO,"},
159 {DEV_TX_OFFLOAD_MACSEC_INSERT, " MACsec Insert,"},
160 {DEV_TX_OFFLOAD_MT_LOCKFREE, " Multi Thread Lockless Tx,"},
161 {DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"},
162 {DEV_TX_OFFLOAD_MBUF_FAST_FREE, " H/W MBUF Free,"},
163 {DEV_TX_OFFLOAD_SECURITY, " Security,"},
164 {DEV_TX_OFFLOAD_UDP_TNL_TSO, " UDP Tunnel TSO,"},
165 {DEV_TX_OFFLOAD_IP_TNL_TSO, " IP Tunnel TSO,"},
166 {DEV_TX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
167 {DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP, " Timestamp,"}
169 static const char *const burst_mode[] = {"Vector Neon, Tx Offloads:",
170 "Scalar, Tx Offloads:"
174 PLT_SET_USED(queue_id);
176 /* Update burst mode info */
177 rc = rte_strscpy(mode->info + bytes, burst_mode[dev->scalar_ena],
184 /* Update Tx offload info */
185 for (i = 0; i < RTE_DIM(tx_offload_map); i++) {
186 if (dev->tx_offloads & tx_offload_map[i].flags) {
187 rc = rte_strscpy(mode->info + bytes,
188 tx_offload_map[i].output,
202 cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
203 struct rte_eth_fc_conf *fc_conf)
205 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
206 enum rte_eth_fc_mode mode_map[] = {
207 RTE_FC_NONE, RTE_FC_RX_PAUSE,
208 RTE_FC_TX_PAUSE, RTE_FC_FULL
210 struct roc_nix *nix = &dev->nix;
213 mode = roc_nix_fc_mode_get(nix);
217 memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf));
218 fc_conf->mode = mode_map[mode];
223 nix_fc_cq_config_set(struct cnxk_eth_dev *dev, uint16_t qid, bool enable)
225 struct roc_nix *nix = &dev->nix;
226 struct roc_nix_fc_cfg fc_cfg;
227 struct roc_nix_cq *cq;
229 memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
231 fc_cfg.cq_cfg_valid = true;
232 fc_cfg.cq_cfg.enable = enable;
233 fc_cfg.cq_cfg.rq = qid;
234 fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
236 return roc_nix_fc_config_set(nix, &fc_cfg);
240 cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
241 struct rte_eth_fc_conf *fc_conf)
243 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
244 enum roc_nix_fc_mode mode_map[] = {
245 ROC_NIX_FC_NONE, ROC_NIX_FC_RX,
246 ROC_NIX_FC_TX, ROC_NIX_FC_FULL
248 struct rte_eth_dev_data *data = eth_dev->data;
249 struct cnxk_fc_cfg *fc = &dev->fc_cfg;
250 struct roc_nix *nix = &dev->nix;
251 uint8_t rx_pause, tx_pause;
254 if (roc_nix_is_vf_or_sdp(nix)) {
255 plt_err("Flow control configuration is not allowed on VFs");
259 if (fc_conf->high_water || fc_conf->low_water || fc_conf->pause_time ||
260 fc_conf->mac_ctrl_frame_fwd || fc_conf->autoneg) {
261 plt_info("Only MODE configuration is supported");
265 if (fc_conf->mode == fc->mode)
268 rx_pause = (fc_conf->mode == RTE_FC_FULL) ||
269 (fc_conf->mode == RTE_FC_RX_PAUSE);
270 tx_pause = (fc_conf->mode == RTE_FC_FULL) ||
271 (fc_conf->mode == RTE_FC_TX_PAUSE);
273 /* Check if TX pause frame is already enabled or not */
274 if (fc->tx_pause ^ tx_pause) {
275 if (roc_model_is_cn96_ax() && data->dev_started) {
276 /* On Ax, CQ should be in disabled state
277 * while setting flow control configuration.
279 plt_info("Stop the port=%d for setting flow control",
284 for (i = 0; i < data->nb_rx_queues; i++) {
285 rc = nix_fc_cq_config_set(dev, i, tx_pause);
291 rc = roc_nix_fc_mode_set(nix, mode_map[fc_conf->mode]);
295 fc->rx_pause = rx_pause;
296 fc->tx_pause = tx_pause;
297 fc->mode = fc_conf->mode;
303 cnxk_nix_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr)
305 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
306 struct roc_nix *nix = &dev->nix;
309 /* Update mac address at NPC */
310 rc = roc_nix_npc_mac_addr_set(nix, addr->addr_bytes);
314 /* Update mac address at CGX for PFs only */
315 if (!roc_nix_is_vf_or_sdp(nix)) {
316 rc = roc_nix_mac_addr_set(nix, addr->addr_bytes);
318 /* Rollback to previous mac address */
319 roc_nix_npc_mac_addr_set(nix, dev->mac_addr);
324 /* Update mac address to cnxk ethernet device */
325 rte_memcpy(dev->mac_addr, addr->addr_bytes, RTE_ETHER_ADDR_LEN);
332 cnxk_nix_mac_addr_add(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr,
333 uint32_t index, uint32_t pool)
335 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
336 struct roc_nix *nix = &dev->nix;
342 rc = roc_nix_mac_addr_add(nix, addr->addr_bytes);
344 plt_err("Failed to add mac address, rc=%d", rc);
348 /* Enable promiscuous mode at NIX level */
349 roc_nix_npc_promisc_ena_dis(nix, true);
350 dev->dmac_filter_enable = true;
351 eth_dev->data->promiscuous = false;
357 cnxk_nix_mac_addr_del(struct rte_eth_dev *eth_dev, uint32_t index)
359 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
360 struct roc_nix *nix = &dev->nix;
363 rc = roc_nix_mac_addr_del(nix, index);
365 plt_err("Failed to delete mac address, rc=%d", rc);
369 cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
371 uint32_t old_frame_size, frame_size = mtu + CNXK_NIX_L2_OVERHEAD;
372 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
373 struct rte_eth_dev_data *data = eth_dev->data;
374 struct roc_nix *nix = &dev->nix;
378 /* Check if MTU is within the allowed range */
379 if ((frame_size - RTE_ETHER_CRC_LEN) < NIX_MIN_HW_FRS) {
380 plt_err("MTU is lesser than minimum");
384 if ((frame_size - RTE_ETHER_CRC_LEN) >
385 ((uint32_t)roc_nix_max_pkt_len(nix))) {
386 plt_err("MTU is greater than maximum");
390 buffsz = data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
391 old_frame_size = data->mtu + CNXK_NIX_L2_OVERHEAD;
393 /* Refuse MTU that requires the support of scattered packets
394 * when this feature has not been enabled before.
396 if (data->dev_started && frame_size > buffsz &&
397 !(dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
398 plt_err("Scatter offload is not enabled for mtu");
402 /* Check <seg size> * <max_seg> >= max_frame */
403 if ((dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER) &&
404 frame_size > (buffsz * CNXK_NIX_RX_NB_SEG_MAX)) {
405 plt_err("Greater than maximum supported packet length");
409 frame_size -= RTE_ETHER_CRC_LEN;
411 /* Update mtu on Tx */
412 rc = roc_nix_mac_mtu_set(nix, frame_size);
414 plt_err("Failed to set MTU, rc=%d", rc);
418 /* Sync same frame size on Rx */
419 rc = roc_nix_mac_max_rx_len_set(nix, frame_size);
421 /* Rollback to older mtu */
422 roc_nix_mac_mtu_set(nix,
423 old_frame_size - RTE_ETHER_CRC_LEN);
424 plt_err("Failed to max Rx frame length, rc=%d", rc);
428 frame_size += RTE_ETHER_CRC_LEN;
430 if (frame_size > RTE_ETHER_MAX_LEN)
431 dev->rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
433 dev->rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
435 /* Update max_rx_pkt_len */
436 data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
443 cnxk_nix_promisc_enable(struct rte_eth_dev *eth_dev)
445 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
446 struct roc_nix *nix = &dev->nix;
449 if (roc_nix_is_vf_or_sdp(nix))
452 rc = roc_nix_npc_promisc_ena_dis(nix, true);
454 plt_err("Failed to setup promisc mode in npc, rc=%d(%s)", rc,
455 roc_error_msg_get(rc));
459 rc = roc_nix_mac_promisc_mode_enable(nix, true);
461 plt_err("Failed to setup promisc mode in mac, rc=%d(%s)", rc,
462 roc_error_msg_get(rc));
463 roc_nix_npc_promisc_ena_dis(nix, false);
471 cnxk_nix_promisc_disable(struct rte_eth_dev *eth_dev)
473 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
474 struct roc_nix *nix = &dev->nix;
477 if (roc_nix_is_vf_or_sdp(nix))
480 rc = roc_nix_npc_promisc_ena_dis(nix, dev->dmac_filter_enable);
482 plt_err("Failed to setup promisc mode in npc, rc=%d(%s)", rc,
483 roc_error_msg_get(rc));
487 rc = roc_nix_mac_promisc_mode_enable(nix, false);
489 plt_err("Failed to setup promisc mode in mac, rc=%d(%s)", rc,
490 roc_error_msg_get(rc));
491 roc_nix_npc_promisc_ena_dis(nix, !dev->dmac_filter_enable);
495 dev->dmac_filter_enable = false;
500 cnxk_nix_allmulticast_enable(struct rte_eth_dev *eth_dev)
502 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
504 return roc_nix_npc_mcast_config(&dev->nix, true, false);
508 cnxk_nix_allmulticast_disable(struct rte_eth_dev *eth_dev)
510 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
512 return roc_nix_npc_mcast_config(&dev->nix, false,
513 eth_dev->data->promiscuous);
517 cnxk_nix_set_link_up(struct rte_eth_dev *eth_dev)
519 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
520 struct roc_nix *nix = &dev->nix;
523 if (roc_nix_is_vf_or_sdp(nix))
526 rc = roc_nix_mac_link_state_set(nix, true);
530 /* Start tx queues */
531 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
532 rc = cnxk_nix_tx_queue_start(eth_dev, i);
542 cnxk_nix_set_link_down(struct rte_eth_dev *eth_dev)
544 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
545 struct roc_nix *nix = &dev->nix;
548 if (roc_nix_is_vf_or_sdp(nix))
552 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
553 rc = cnxk_nix_tx_queue_stop(eth_dev, i);
558 rc = roc_nix_mac_link_state_set(nix, false);
564 cnxk_nix_get_module_info(struct rte_eth_dev *eth_dev,
565 struct rte_eth_dev_module_info *modinfo)
567 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
568 struct roc_nix_eeprom_info eeprom_info = {0};
569 struct roc_nix *nix = &dev->nix;
572 rc = roc_nix_eeprom_info_get(nix, &eeprom_info);
576 modinfo->type = eeprom_info.sff_id;
577 modinfo->eeprom_len = ROC_NIX_EEPROM_SIZE;
582 cnxk_nix_get_module_eeprom(struct rte_eth_dev *eth_dev,
583 struct rte_dev_eeprom_info *info)
585 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
586 struct roc_nix_eeprom_info eeprom_info = {0};
587 struct roc_nix *nix = &dev->nix;
590 if (!info->data || !info->length ||
591 (info->offset + info->length > ROC_NIX_EEPROM_SIZE))
594 rc = roc_nix_eeprom_info_get(nix, &eeprom_info);
598 rte_memcpy(info->data, eeprom_info.buf + info->offset, info->length);
603 cnxk_nix_rx_queue_intr_enable(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
605 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
607 roc_nix_rx_queue_intr_enable(&dev->nix, rx_queue_id);
612 cnxk_nix_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
613 uint16_t rx_queue_id)
615 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
617 roc_nix_rx_queue_intr_disable(&dev->nix, rx_queue_id);
622 cnxk_nix_pool_ops_supported(struct rte_eth_dev *eth_dev, const char *pool)
624 RTE_SET_USED(eth_dev);
626 if (!strcmp(pool, rte_mbuf_platform_mempool_ops()))
633 cnxk_nix_rxq_info_get(struct rte_eth_dev *eth_dev, uint16_t qid,
634 struct rte_eth_rxq_info *qinfo)
636 void *rxq = eth_dev->data->rx_queues[qid];
637 struct cnxk_eth_rxq_sp *rxq_sp = cnxk_eth_rxq_to_sp(rxq);
639 memset(qinfo, 0, sizeof(*qinfo));
641 qinfo->mp = rxq_sp->qconf.mp;
642 qinfo->scattered_rx = eth_dev->data->scattered_rx;
643 qinfo->nb_desc = rxq_sp->qconf.nb_desc;
645 memcpy(&qinfo->conf, &rxq_sp->qconf.conf.rx, sizeof(qinfo->conf));
649 cnxk_nix_txq_info_get(struct rte_eth_dev *eth_dev, uint16_t qid,
650 struct rte_eth_txq_info *qinfo)
652 void *txq = eth_dev->data->tx_queues[qid];
653 struct cnxk_eth_txq_sp *txq_sp = cnxk_eth_txq_to_sp(txq);
655 memset(qinfo, 0, sizeof(*qinfo));
657 qinfo->nb_desc = txq_sp->qconf.nb_desc;
659 memcpy(&qinfo->conf, &txq_sp->qconf.conf.tx, sizeof(qinfo->conf));