1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include <cnxk_ethdev.h>
8 cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
10 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
11 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
14 max_rx_pktlen = (roc_nix_max_pkt_len(&dev->nix) + RTE_ETHER_CRC_LEN -
15 CNXK_NIX_MAX_VTAG_ACT_SIZE);
17 devinfo->min_rx_bufsize = NIX_MIN_HW_FRS + RTE_ETHER_CRC_LEN;
18 devinfo->max_rx_pktlen = max_rx_pktlen;
19 devinfo->max_rx_queues = RTE_MAX_QUEUES_PER_PORT;
20 devinfo->max_tx_queues = RTE_MAX_QUEUES_PER_PORT;
21 devinfo->max_mac_addrs = dev->max_mac_entries;
22 devinfo->max_vfs = pci_dev->max_vfs;
23 devinfo->max_mtu = devinfo->max_rx_pktlen -
24 (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN);
25 devinfo->min_mtu = devinfo->min_rx_bufsize - CNXK_NIX_L2_OVERHEAD;
27 devinfo->rx_offload_capa = dev->rx_offload_capa;
28 devinfo->tx_offload_capa = dev->tx_offload_capa;
29 devinfo->rx_queue_offload_capa = 0;
30 devinfo->tx_queue_offload_capa = 0;
32 devinfo->reta_size = dev->nix.reta_sz;
33 devinfo->hash_key_size = ROC_NIX_RSS_KEY_LEN;
34 devinfo->flow_type_rss_offloads = CNXK_NIX_RSS_OFFLOAD;
36 devinfo->default_rxconf = (struct rte_eth_rxconf){
41 devinfo->default_txconf = (struct rte_eth_txconf){
45 devinfo->default_rxportconf = (struct rte_eth_dev_portconf){
46 .ring_size = CNXK_NIX_RX_DEFAULT_RING_SZ,
49 devinfo->rx_desc_lim = (struct rte_eth_desc_lim){
51 .nb_min = CNXK_NIX_RX_MIN_DESC,
52 .nb_align = CNXK_NIX_RX_MIN_DESC_ALIGN,
53 .nb_seg_max = CNXK_NIX_RX_NB_SEG_MAX,
54 .nb_mtu_seg_max = CNXK_NIX_RX_NB_SEG_MAX,
56 devinfo->rx_desc_lim.nb_max =
57 RTE_ALIGN_MUL_FLOOR(devinfo->rx_desc_lim.nb_max,
58 CNXK_NIX_RX_MIN_DESC_ALIGN);
60 devinfo->tx_desc_lim = (struct rte_eth_desc_lim){
64 .nb_seg_max = CNXK_NIX_TX_NB_SEG_MAX,
65 .nb_mtu_seg_max = CNXK_NIX_TX_NB_SEG_MAX,
68 devinfo->speed_capa = dev->speed_capa;
69 devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
70 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP |
71 RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
76 cnxk_nix_rx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
77 struct rte_eth_burst_mode *mode)
79 ssize_t bytes = 0, str_size = RTE_ETH_BURST_MODE_INFO_SIZE, rc;
80 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
81 const struct burst_info {
84 } rx_offload_map[] = {
85 {RTE_ETH_RX_OFFLOAD_VLAN_STRIP, " VLAN Strip,"},
86 {RTE_ETH_RX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
87 {RTE_ETH_RX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
88 {RTE_ETH_RX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
89 {RTE_ETH_RX_OFFLOAD_TCP_LRO, " TCP LRO,"},
90 {RTE_ETH_RX_OFFLOAD_QINQ_STRIP, " QinQ VLAN Strip,"},
91 {RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
92 {RTE_ETH_RX_OFFLOAD_MACSEC_STRIP, " MACsec Strip,"},
93 {RTE_ETH_RX_OFFLOAD_HEADER_SPLIT, " Header Split,"},
94 {RTE_ETH_RX_OFFLOAD_VLAN_FILTER, " VLAN Filter,"},
95 {RTE_ETH_RX_OFFLOAD_VLAN_EXTEND, " VLAN Extend,"},
96 {RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"},
97 {RTE_ETH_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
98 {RTE_ETH_RX_OFFLOAD_SECURITY, " Security,"},
99 {RTE_ETH_RX_OFFLOAD_KEEP_CRC, " Keep CRC,"},
100 {RTE_ETH_RX_OFFLOAD_SCTP_CKSUM, " SCTP,"},
101 {RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
102 {RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"}
104 static const char *const burst_mode[] = {"Vector Neon, Rx Offloads:",
105 "Scalar, Rx Offloads:"
109 PLT_SET_USED(queue_id);
111 /* Update burst mode info */
112 rc = rte_strscpy(mode->info + bytes, burst_mode[dev->scalar_ena],
119 /* Update Rx offload info */
120 for (i = 0; i < RTE_DIM(rx_offload_map); i++) {
121 if (dev->rx_offloads & rx_offload_map[i].flags) {
122 rc = rte_strscpy(mode->info + bytes,
123 rx_offload_map[i].output,
137 cnxk_nix_tx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
138 struct rte_eth_burst_mode *mode)
140 ssize_t bytes = 0, str_size = RTE_ETH_BURST_MODE_INFO_SIZE, rc;
141 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
142 const struct burst_info {
145 } tx_offload_map[] = {
146 {RTE_ETH_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
147 {RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
148 {RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
149 {RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
150 {RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP Checksum,"},
151 {RTE_ETH_TX_OFFLOAD_TCP_TSO, " TCP TSO,"},
152 {RTE_ETH_TX_OFFLOAD_UDP_TSO, " UDP TSO,"},
153 {RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
154 {RTE_ETH_TX_OFFLOAD_QINQ_INSERT, " QinQ VLAN Insert,"},
155 {RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO, " VXLAN Tunnel TSO,"},
156 {RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO, " GRE Tunnel TSO,"},
157 {RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO, " IP-in-IP Tunnel TSO,"},
158 {RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO, " Geneve Tunnel TSO,"},
159 {RTE_ETH_TX_OFFLOAD_MACSEC_INSERT, " MACsec Insert,"},
160 {RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " Multi Thread Lockless Tx,"},
161 {RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"},
162 {RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " H/W MBUF Free,"},
163 {RTE_ETH_TX_OFFLOAD_SECURITY, " Security,"},
164 {RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO, " UDP Tunnel TSO,"},
165 {RTE_ETH_TX_OFFLOAD_IP_TNL_TSO, " IP Tunnel TSO,"},
166 {RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
167 {RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP, " Timestamp,"}
169 static const char *const burst_mode[] = {"Vector Neon, Tx Offloads:",
170 "Scalar, Tx Offloads:"
174 PLT_SET_USED(queue_id);
176 /* Update burst mode info */
177 rc = rte_strscpy(mode->info + bytes, burst_mode[dev->scalar_ena],
184 /* Update Tx offload info */
185 for (i = 0; i < RTE_DIM(tx_offload_map); i++) {
186 if (dev->tx_offloads & tx_offload_map[i].flags) {
187 rc = rte_strscpy(mode->info + bytes,
188 tx_offload_map[i].output,
202 cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
203 struct rte_eth_fc_conf *fc_conf)
205 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
206 enum rte_eth_fc_mode mode_map[] = {
207 RTE_ETH_FC_NONE, RTE_ETH_FC_RX_PAUSE,
208 RTE_ETH_FC_TX_PAUSE, RTE_ETH_FC_FULL
210 struct roc_nix *nix = &dev->nix;
213 mode = roc_nix_fc_mode_get(nix);
217 memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf));
218 fc_conf->mode = mode_map[mode];
223 nix_fc_cq_config_set(struct cnxk_eth_dev *dev, uint16_t qid, bool enable)
225 struct roc_nix *nix = &dev->nix;
226 struct roc_nix_fc_cfg fc_cfg;
227 struct roc_nix_cq *cq;
229 memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
231 fc_cfg.type = ROC_NIX_FC_CQ_CFG;
232 fc_cfg.cq_cfg.enable = enable;
233 /* Map all CQs to last channel */
234 fc_cfg.cq_cfg.tc = roc_nix_chan_count_get(nix) - 1;
235 fc_cfg.cq_cfg.rq = qid;
236 fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
238 return roc_nix_fc_config_set(nix, &fc_cfg);
242 cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
243 struct rte_eth_fc_conf *fc_conf)
245 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
246 enum roc_nix_fc_mode mode_map[] = {
247 ROC_NIX_FC_NONE, ROC_NIX_FC_RX,
248 ROC_NIX_FC_TX, ROC_NIX_FC_FULL
250 struct rte_eth_dev_data *data = eth_dev->data;
251 struct cnxk_fc_cfg *fc = &dev->fc_cfg;
252 struct roc_nix *nix = &dev->nix;
253 struct cnxk_eth_rxq_sp *rxq;
254 struct cnxk_eth_txq_sp *txq;
255 uint8_t rx_pause, tx_pause;
258 if (roc_nix_is_vf_or_sdp(nix) && !roc_nix_is_lbk(nix)) {
259 plt_err("Flow control configuration is not allowed on VFs");
263 if (fc_conf->high_water || fc_conf->low_water || fc_conf->pause_time ||
264 fc_conf->mac_ctrl_frame_fwd || fc_conf->autoneg) {
265 plt_info("Only MODE configuration is supported");
269 if (fc_conf->mode == fc->mode)
272 rx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
273 (fc_conf->mode == RTE_ETH_FC_RX_PAUSE);
274 tx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
275 (fc_conf->mode == RTE_ETH_FC_TX_PAUSE);
277 /* Check if TX pause frame is already enabled or not */
278 if (fc->tx_pause ^ tx_pause) {
279 if (roc_model_is_cn96_ax() && data->dev_started) {
280 /* On Ax, CQ should be in disabled state
281 * while setting flow control configuration.
283 plt_info("Stop the port=%d for setting flow control",
288 for (i = 0; i < data->nb_rx_queues; i++) {
289 struct roc_nix_fc_cfg fc_cfg;
291 memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
292 rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[i]) -
294 rc = nix_fc_cq_config_set(dev, rxq->qid, !!tx_pause);
300 /* Check if RX pause frame is enabled or not */
301 if (fc->rx_pause ^ rx_pause) {
302 for (i = 0; i < data->nb_tx_queues; i++) {
303 struct roc_nix_fc_cfg fc_cfg;
305 memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
306 txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[i]) -
308 fc_cfg.type = ROC_NIX_FC_TM_CFG;
309 fc_cfg.tm_cfg.sq = txq->qid;
310 fc_cfg.tm_cfg.enable = !!rx_pause;
311 rc = roc_nix_fc_config_set(nix, &fc_cfg);
317 rc = roc_nix_fc_mode_set(nix, mode_map[fc_conf->mode]);
321 fc->rx_pause = rx_pause;
322 fc->tx_pause = tx_pause;
323 fc->mode = fc_conf->mode;
329 cnxk_nix_priority_flow_ctrl_queue_info_get(struct rte_eth_dev *eth_dev,
330 struct rte_eth_pfc_queue_info *pfc_info)
332 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
334 pfc_info->tc_max = roc_nix_chan_count_get(&dev->nix);
335 pfc_info->mode_capa = RTE_ETH_FC_FULL;
340 cnxk_nix_priority_flow_ctrl_queue_config(struct rte_eth_dev *eth_dev,
341 struct rte_eth_pfc_queue_conf *pfc_conf)
343 struct cnxk_pfc_cfg conf;
346 memset(&conf, 0, sizeof(struct cnxk_pfc_cfg));
348 conf.fc_cfg.mode = pfc_conf->mode;
350 conf.pause_time = pfc_conf->tx_pause.pause_time;
351 conf.rx_tc = pfc_conf->tx_pause.tc;
352 conf.rx_qid = pfc_conf->tx_pause.rx_qid;
354 conf.tx_tc = pfc_conf->rx_pause.tc;
355 conf.tx_qid = pfc_conf->rx_pause.tx_qid;
357 rc = nix_priority_flow_ctrl_configure(eth_dev, &conf);
365 cnxk_nix_flow_ops_get(struct rte_eth_dev *eth_dev,
366 const struct rte_flow_ops **ops)
368 RTE_SET_USED(eth_dev);
370 *ops = &cnxk_flow_ops;
375 cnxk_nix_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr)
377 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
378 struct roc_nix *nix = &dev->nix;
381 /* Update mac address at NPC */
382 rc = roc_nix_npc_mac_addr_set(nix, addr->addr_bytes);
386 /* Update mac address at CGX for PFs only */
387 if (!roc_nix_is_vf_or_sdp(nix)) {
388 rc = roc_nix_mac_addr_set(nix, addr->addr_bytes);
390 /* Rollback to previous mac address */
391 roc_nix_npc_mac_addr_set(nix, dev->mac_addr);
396 /* Update mac address to cnxk ethernet device */
397 rte_memcpy(dev->mac_addr, addr->addr_bytes, RTE_ETHER_ADDR_LEN);
404 cnxk_nix_mac_addr_add(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr,
405 uint32_t index, uint32_t pool)
407 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
408 struct roc_nix *nix = &dev->nix;
414 rc = roc_nix_mac_addr_add(nix, addr->addr_bytes);
416 plt_err("Failed to add mac address, rc=%d", rc);
420 /* Enable promiscuous mode at NIX level */
421 roc_nix_npc_promisc_ena_dis(nix, true);
422 dev->dmac_filter_enable = true;
423 eth_dev->data->promiscuous = false;
424 dev->dmac_filter_count++;
430 cnxk_nix_mac_addr_del(struct rte_eth_dev *eth_dev, uint32_t index)
432 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
433 struct roc_nix *nix = &dev->nix;
436 rc = roc_nix_mac_addr_del(nix, index);
438 plt_err("Failed to delete mac address, rc=%d", rc);
440 dev->dmac_filter_count--;
444 cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
446 uint32_t old_frame_size, frame_size = mtu + CNXK_NIX_L2_OVERHEAD;
447 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
448 struct rte_eth_dev_data *data = eth_dev->data;
449 struct roc_nix *nix = &dev->nix;
453 frame_size += CNXK_NIX_TIMESYNC_RX_OFFSET * dev->ptp_en;
455 /* Check if MTU is within the allowed range */
456 if ((frame_size - RTE_ETHER_CRC_LEN) < NIX_MIN_HW_FRS) {
457 plt_err("MTU is lesser than minimum");
461 if ((frame_size - RTE_ETHER_CRC_LEN) >
462 ((uint32_t)roc_nix_max_pkt_len(nix))) {
463 plt_err("MTU is greater than maximum");
467 buffsz = data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
468 old_frame_size = data->mtu + CNXK_NIX_L2_OVERHEAD;
470 /* Refuse MTU that requires the support of scattered packets
471 * when this feature has not been enabled before.
473 if (data->dev_started && frame_size > buffsz &&
474 !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
475 plt_err("Scatter offload is not enabled for mtu");
479 /* Check <seg size> * <max_seg> >= max_frame */
480 if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) &&
481 frame_size > (buffsz * CNXK_NIX_RX_NB_SEG_MAX)) {
482 plt_err("Greater than maximum supported packet length");
486 frame_size -= RTE_ETHER_CRC_LEN;
488 /* Update mtu on Tx */
489 rc = roc_nix_mac_mtu_set(nix, frame_size);
491 plt_err("Failed to set MTU, rc=%d", rc);
495 /* Sync same frame size on Rx */
496 rc = roc_nix_mac_max_rx_len_set(nix, frame_size);
498 /* Rollback to older mtu */
499 roc_nix_mac_mtu_set(nix,
500 old_frame_size - RTE_ETHER_CRC_LEN);
501 plt_err("Failed to max Rx frame length, rc=%d", rc);
509 cnxk_nix_promisc_enable(struct rte_eth_dev *eth_dev)
511 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
512 struct roc_nix *nix = &dev->nix;
515 if (roc_nix_is_vf_or_sdp(nix))
518 rc = roc_nix_npc_promisc_ena_dis(nix, true);
520 plt_err("Failed to setup promisc mode in npc, rc=%d(%s)", rc,
521 roc_error_msg_get(rc));
525 rc = roc_nix_mac_promisc_mode_enable(nix, true);
527 plt_err("Failed to setup promisc mode in mac, rc=%d(%s)", rc,
528 roc_error_msg_get(rc));
529 roc_nix_npc_promisc_ena_dis(nix, false);
537 cnxk_nix_promisc_disable(struct rte_eth_dev *eth_dev)
539 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
540 struct roc_nix *nix = &dev->nix;
543 if (roc_nix_is_vf_or_sdp(nix))
546 rc = roc_nix_npc_promisc_ena_dis(nix, dev->dmac_filter_enable);
548 plt_err("Failed to setup promisc mode in npc, rc=%d(%s)", rc,
549 roc_error_msg_get(rc));
553 rc = roc_nix_mac_promisc_mode_enable(nix, false);
555 plt_err("Failed to setup promisc mode in mac, rc=%d(%s)", rc,
556 roc_error_msg_get(rc));
557 roc_nix_npc_promisc_ena_dis(nix, !dev->dmac_filter_enable);
561 dev->dmac_filter_enable = false;
566 cnxk_nix_allmulticast_enable(struct rte_eth_dev *eth_dev)
568 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
570 return roc_nix_npc_mcast_config(&dev->nix, true,
571 eth_dev->data->promiscuous);
575 cnxk_nix_allmulticast_disable(struct rte_eth_dev *eth_dev)
577 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
579 return roc_nix_npc_mcast_config(&dev->nix, false,
580 eth_dev->data->promiscuous);
584 cnxk_nix_set_link_up(struct rte_eth_dev *eth_dev)
586 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
587 struct roc_nix *nix = &dev->nix;
590 if (roc_nix_is_vf_or_sdp(nix))
593 rc = roc_nix_mac_link_state_set(nix, true);
597 /* Start tx queues */
598 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
599 rc = cnxk_nix_tx_queue_start(eth_dev, i);
609 cnxk_nix_set_link_down(struct rte_eth_dev *eth_dev)
611 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
612 struct roc_nix *nix = &dev->nix;
615 if (roc_nix_is_vf_or_sdp(nix))
619 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
620 rc = cnxk_nix_tx_queue_stop(eth_dev, i);
625 rc = roc_nix_mac_link_state_set(nix, false);
631 cnxk_nix_get_module_info(struct rte_eth_dev *eth_dev,
632 struct rte_eth_dev_module_info *modinfo)
634 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
635 struct roc_nix_eeprom_info eeprom_info = {0};
636 struct roc_nix *nix = &dev->nix;
639 rc = roc_nix_eeprom_info_get(nix, &eeprom_info);
643 modinfo->type = eeprom_info.sff_id;
644 modinfo->eeprom_len = ROC_NIX_EEPROM_SIZE;
649 cnxk_nix_get_module_eeprom(struct rte_eth_dev *eth_dev,
650 struct rte_dev_eeprom_info *info)
652 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
653 struct roc_nix_eeprom_info eeprom_info = {0};
654 struct roc_nix *nix = &dev->nix;
657 if (!info->data || !info->length ||
658 (info->offset + info->length > ROC_NIX_EEPROM_SIZE))
661 rc = roc_nix_eeprom_info_get(nix, &eeprom_info);
665 rte_memcpy(info->data, eeprom_info.buf + info->offset, info->length);
670 cnxk_nix_rx_queue_intr_enable(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
672 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
674 roc_nix_rx_queue_intr_enable(&dev->nix, rx_queue_id);
679 cnxk_nix_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
680 uint16_t rx_queue_id)
682 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
684 roc_nix_rx_queue_intr_disable(&dev->nix, rx_queue_id);
689 cnxk_nix_pool_ops_supported(struct rte_eth_dev *eth_dev, const char *pool)
691 RTE_SET_USED(eth_dev);
693 if (!strcmp(pool, rte_mbuf_platform_mempool_ops()))
700 cnxk_nix_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
703 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
704 const char *str = roc_npc_profile_name_get(&dev->npc);
705 uint32_t size = strlen(str) + 1;
710 rte_strlcpy(fw_version, str, fw_size);
719 cnxk_nix_rxq_info_get(struct rte_eth_dev *eth_dev, uint16_t qid,
720 struct rte_eth_rxq_info *qinfo)
722 void *rxq = eth_dev->data->rx_queues[qid];
723 struct cnxk_eth_rxq_sp *rxq_sp = cnxk_eth_rxq_to_sp(rxq);
725 memset(qinfo, 0, sizeof(*qinfo));
727 qinfo->mp = rxq_sp->qconf.mp;
728 qinfo->scattered_rx = eth_dev->data->scattered_rx;
729 qinfo->nb_desc = rxq_sp->qconf.nb_desc;
731 memcpy(&qinfo->conf, &rxq_sp->qconf.conf.rx, sizeof(qinfo->conf));
735 cnxk_nix_txq_info_get(struct rte_eth_dev *eth_dev, uint16_t qid,
736 struct rte_eth_txq_info *qinfo)
738 void *txq = eth_dev->data->tx_queues[qid];
739 struct cnxk_eth_txq_sp *txq_sp = cnxk_eth_txq_to_sp(txq);
741 memset(qinfo, 0, sizeof(*qinfo));
743 qinfo->nb_desc = txq_sp->qconf.nb_desc;
745 memcpy(&qinfo->conf, &txq_sp->qconf.conf.tx, sizeof(qinfo->conf));
749 cnxk_nix_rx_queue_count(void *rxq)
751 struct cnxk_eth_rxq_sp *rxq_sp = cnxk_eth_rxq_to_sp(rxq);
752 struct roc_nix *nix = &rxq_sp->dev->nix;
755 roc_nix_cq_head_tail_get(nix, rxq_sp->qid, &head, &tail);
756 return (tail - head) % (rxq_sp->qconf.nb_desc);
760 nix_offset_has_packet(uint32_t head, uint32_t tail, uint16_t offset, bool is_rx)
762 /* Check given offset(queue index) has packet filled/xmit by HW
763 * in case of Rx or Tx.
764 * Also, checks for wrap around case.
766 return ((tail > head && offset <= tail && offset >= head) ||
767 (head > tail && (offset >= head || offset <= tail))) ?
773 cnxk_nix_rx_descriptor_status(void *rxq, uint16_t offset)
775 struct cnxk_eth_rxq_sp *rxq_sp = cnxk_eth_rxq_to_sp(rxq);
776 struct roc_nix *nix = &rxq_sp->dev->nix;
779 if (rxq_sp->qconf.nb_desc <= offset)
782 roc_nix_cq_head_tail_get(nix, rxq_sp->qid, &head, &tail);
784 if (nix_offset_has_packet(head, tail, offset, 1))
785 return RTE_ETH_RX_DESC_DONE;
787 return RTE_ETH_RX_DESC_AVAIL;
791 cnxk_nix_tx_descriptor_status(void *txq, uint16_t offset)
793 struct cnxk_eth_txq_sp *txq_sp = cnxk_eth_txq_to_sp(txq);
794 struct roc_nix *nix = &txq_sp->dev->nix;
795 uint32_t head = 0, tail = 0;
797 if (txq_sp->qconf.nb_desc <= offset)
800 roc_nix_sq_head_tail_get(nix, txq_sp->qid, &head, &tail);
802 if (nix_offset_has_packet(head, tail, offset, 0))
803 return RTE_ETH_TX_DESC_DONE;
805 return RTE_ETH_TX_DESC_FULL;
808 /* It is a NOP for cnxk as HW frees the buffer on xmit */
810 cnxk_nix_tx_done_cleanup(void *txq, uint32_t free_cnt)
813 RTE_SET_USED(free_cnt);
819 cnxk_nix_dev_get_reg(struct rte_eth_dev *eth_dev, struct rte_dev_reg_info *regs)
821 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
822 struct roc_nix *nix = &dev->nix;
823 uint64_t *data = regs->data;
827 rc = roc_nix_lf_get_reg_count(nix);
837 regs->length == (uint32_t)roc_nix_lf_get_reg_count(nix))
838 return roc_nix_lf_reg_dump(nix, data);
844 cnxk_nix_reta_update(struct rte_eth_dev *eth_dev,
845 struct rte_eth_rss_reta_entry64 *reta_conf,
848 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
849 uint16_t reta[ROC_NIX_RSS_RETA_MAX];
850 struct roc_nix *nix = &dev->nix;
851 int i, j, rc = -EINVAL, idx = 0;
853 if (reta_size != dev->nix.reta_sz) {
854 plt_err("Size of hash lookup table configured (%d) does not "
855 "match the number hardware can supported (%d)",
856 reta_size, dev->nix.reta_sz);
860 /* Copy RETA table */
861 for (i = 0; i < (int)(dev->nix.reta_sz / RTE_ETH_RETA_GROUP_SIZE); i++) {
862 for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
863 if ((reta_conf[i].mask >> j) & 0x01)
864 reta[idx] = reta_conf[i].reta[j];
869 return roc_nix_rss_reta_set(nix, 0, reta);
876 cnxk_nix_reta_query(struct rte_eth_dev *eth_dev,
877 struct rte_eth_rss_reta_entry64 *reta_conf,
880 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
881 uint16_t reta[ROC_NIX_RSS_RETA_MAX];
882 struct roc_nix *nix = &dev->nix;
883 int rc = -EINVAL, i, j, idx = 0;
885 if (reta_size != dev->nix.reta_sz) {
886 plt_err("Size of hash lookup table configured (%d) does not "
887 "match the number hardware can supported (%d)",
888 reta_size, dev->nix.reta_sz);
892 rc = roc_nix_rss_reta_get(nix, 0, reta);
896 /* Copy RETA table */
897 for (i = 0; i < (int)(dev->nix.reta_sz / RTE_ETH_RETA_GROUP_SIZE); i++) {
898 for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
899 if ((reta_conf[i].mask >> j) & 0x01)
900 reta_conf[i].reta[j] = reta[idx];
912 cnxk_nix_rss_hash_update(struct rte_eth_dev *eth_dev,
913 struct rte_eth_rss_conf *rss_conf)
915 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
916 struct roc_nix *nix = &dev->nix;
917 uint8_t rss_hash_level;
918 uint32_t flowkey_cfg;
922 if (rss_conf->rss_key && rss_conf->rss_key_len != ROC_NIX_RSS_KEY_LEN) {
923 plt_err("Hash key size mismatch %d vs %d",
924 rss_conf->rss_key_len, ROC_NIX_RSS_KEY_LEN);
928 if (rss_conf->rss_key)
929 roc_nix_rss_key_set(nix, rss_conf->rss_key);
931 rss_hash_level = RTE_ETH_RSS_LEVEL(rss_conf->rss_hf);
935 cnxk_rss_ethdev_to_nix(dev, rss_conf->rss_hf, rss_hash_level);
937 rc = roc_nix_rss_flowkey_set(nix, &alg_idx, flowkey_cfg,
938 ROC_NIX_RSS_GROUP_DEFAULT,
939 ROC_NIX_RSS_MCAM_IDX_DEFAULT);
941 plt_err("Failed to set RSS hash function rc=%d", rc);
950 cnxk_nix_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
951 struct rte_eth_rss_conf *rss_conf)
953 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
955 if (rss_conf->rss_key)
956 roc_nix_rss_key_get(&dev->nix, rss_conf->rss_key);
958 rss_conf->rss_key_len = ROC_NIX_RSS_KEY_LEN;
959 rss_conf->rss_hf = dev->ethdev_rss_hf;
965 cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev,
966 struct rte_ether_addr *mc_addr_set,
969 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
970 struct rte_eth_dev_data *data = eth_dev->data;
971 struct rte_ether_addr null_mac_addr;
972 struct roc_nix *nix = &dev->nix;
976 memset(&null_mac_addr, 0, sizeof(null_mac_addr));
978 /* All configured multicast filters should be flushed first */
979 for (i = 0; i < dev->max_mac_entries; i++) {
980 if (rte_is_multicast_ether_addr(&data->mac_addrs[i])) {
981 rc = roc_nix_mac_addr_del(nix, i);
983 plt_err("Failed to flush mcast address, rc=%d",
988 dev->dmac_filter_count--;
989 /* Update address in NIC data structure */
990 rte_ether_addr_copy(&null_mac_addr,
991 &data->mac_addrs[i]);
995 if (!mc_addr_set || !nb_mc_addr)
998 /* Check for available space */
1000 ((uint32_t)(dev->max_mac_entries - dev->dmac_filter_count))) {
1001 plt_err("No space is available to add multicast filters");
1005 /* Multicast addresses are to be installed */
1006 for (i = 0; i < nb_mc_addr; i++) {
1007 index = roc_nix_mac_addr_add(nix, mc_addr_set[i].addr_bytes);
1009 plt_err("Failed to add mcast mac address, rc=%d",
1014 dev->dmac_filter_count++;
1015 /* Update address in NIC data structure */
1016 rte_ether_addr_copy(&mc_addr_set[i], &data->mac_addrs[index]);
1019 roc_nix_npc_promisc_ena_dis(nix, true);
1020 dev->dmac_filter_enable = true;
1021 eth_dev->data->promiscuous = false;
1027 nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
1028 struct cnxk_pfc_cfg *conf)
1030 enum roc_nix_fc_mode mode_map[] = {ROC_NIX_FC_NONE, ROC_NIX_FC_RX,
1031 ROC_NIX_FC_TX, ROC_NIX_FC_FULL};
1032 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1033 struct rte_eth_dev_data *data = eth_dev->data;
1034 struct cnxk_pfc_cfg *pfc = &dev->pfc_cfg;
1035 struct roc_nix *nix = &dev->nix;
1036 struct roc_nix_pfc_cfg pfc_cfg;
1037 struct roc_nix_fc_cfg fc_cfg;
1038 struct cnxk_eth_rxq_sp *rxq;
1039 struct cnxk_eth_txq_sp *txq;
1040 uint8_t rx_pause, tx_pause;
1041 enum rte_eth_fc_mode mode;
1042 struct roc_nix_cq *cq;
1043 struct roc_nix_sq *sq;
1046 if (roc_nix_is_vf_or_sdp(nix)) {
1047 plt_err("Prio flow ctrl config is not allowed on VF and SDP");
1051 if (roc_model_is_cn96_ax() && data->dev_started) {
1052 /* On Ax, CQ should be in disabled state
1053 * while setting flow control configuration.
1055 plt_info("Stop the port=%d for setting flow control",
1060 if (dev->pfc_tc_sq_map[conf->tx_tc] != 0xFFFF &&
1061 dev->pfc_tc_sq_map[conf->tx_tc] != conf->tx_qid) {
1062 plt_err("Same TC can not be configured on multiple SQs");
1066 mode = conf->fc_cfg.mode;
1067 rx_pause = (mode == RTE_ETH_FC_FULL) || (mode == RTE_ETH_FC_RX_PAUSE);
1068 tx_pause = (mode == RTE_ETH_FC_FULL) || (mode == RTE_ETH_FC_TX_PAUSE);
1071 memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
1072 rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[conf->rx_qid]) - 1;
1073 cq = &dev->cqs[rxq->qid];
1074 fc_cfg.type = ROC_NIX_FC_CQ_CFG;
1075 fc_cfg.cq_cfg.tc = conf->rx_tc;
1076 fc_cfg.cq_cfg.enable = !!tx_pause;
1077 fc_cfg.cq_cfg.rq = cq->qid;
1078 fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
1079 rc = roc_nix_fc_config_set(nix, &fc_cfg);
1083 /* Check if RX pause frame is enabled or not */
1084 if (pfc->fc_cfg.rx_pause ^ rx_pause) {
1085 if (conf->tx_qid >= eth_dev->data->nb_tx_queues)
1088 if ((roc_nix_tm_tree_type_get(nix) == ROC_NIX_TM_DEFAULT) &&
1089 eth_dev->data->nb_tx_queues > 1) {
1091 * Disabled xmit will be enabled when
1092 * new topology is available.
1094 rc = roc_nix_tm_hierarchy_disable(nix);
1098 rc = roc_nix_tm_pfc_prepare_tree(nix);
1102 rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_PFC,
1109 txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[conf->tx_qid]) - 1;
1110 sq = &dev->sqs[txq->qid];
1111 memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
1112 fc_cfg.type = ROC_NIX_FC_TM_CFG;
1113 fc_cfg.tm_cfg.sq = sq->qid;
1114 fc_cfg.tm_cfg.tc = conf->tx_tc;
1115 fc_cfg.tm_cfg.enable = !!rx_pause;
1116 rc = roc_nix_fc_config_set(nix, &fc_cfg);
1120 dev->pfc_tc_sq_map[conf->tx_tc] = sq->qid;
1122 /* Configure MAC block */
1124 pfc->class_en |= BIT(conf->rx_tc);
1126 pfc->class_en &= ~BIT(conf->rx_tc);
1129 mode = RTE_ETH_FC_FULL;
1131 memset(&pfc_cfg, 0, sizeof(struct roc_nix_pfc_cfg));
1132 pfc_cfg.mode = mode_map[mode];
1133 pfc_cfg.tc = pfc->class_en;
1134 rc = roc_nix_pfc_mode_set(nix, &pfc_cfg);
1138 pfc->fc_cfg.rx_pause = rx_pause;
1139 pfc->fc_cfg.tx_pause = tx_pause;
1140 pfc->fc_cfg.mode = mode;