1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include <rte_ethdev.h>
6 #include <rte_mbuf_pool_ops.h>
8 #include "otx2_ethdev.h"
11 otx2_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
13 uint32_t buffsz, frame_size = mtu + NIX_L2_OVERHEAD;
14 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
15 struct rte_eth_dev_data *data = eth_dev->data;
16 struct otx2_mbox *mbox = dev->mbox;
17 struct nix_frs_cfg *req;
20 frame_size += NIX_TIMESYNC_RX_OFFSET * otx2_ethdev_is_ptp_en(dev);
22 /* Check if MTU is within the allowed range */
23 if (frame_size < NIX_MIN_FRS || frame_size > NIX_MAX_FRS)
26 buffsz = data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
28 /* Refuse MTU that requires the support of scattered packets
29 * when this feature has not been enabled before.
31 if (data->dev_started && frame_size > buffsz &&
32 !(dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER))
35 /* Check <seg size> * <max_seg> >= max_frame */
36 if ((dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER) &&
37 (frame_size > buffsz * NIX_RX_NB_SEG_MAX))
40 req = otx2_mbox_alloc_msg_nix_set_hw_frs(mbox);
41 req->update_smq = true;
42 if (otx2_dev_is_sdp(dev))
44 /* FRS HW config should exclude FCS but include NPC VTAG insert size */
45 req->maxlen = frame_size - RTE_ETHER_CRC_LEN + NIX_MAX_VTAG_ACT_SIZE;
47 rc = otx2_mbox_process(mbox);
51 /* Now just update Rx MAXLEN */
52 req = otx2_mbox_alloc_msg_nix_set_hw_frs(mbox);
53 req->maxlen = frame_size - RTE_ETHER_CRC_LEN;
54 if (otx2_dev_is_sdp(dev))
57 rc = otx2_mbox_process(mbox);
61 if (frame_size > RTE_ETHER_MAX_LEN)
62 dev->rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
64 dev->rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
66 /* Update max_rx_pkt_len */
67 data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
73 otx2_nix_recalc_mtu(struct rte_eth_dev *eth_dev)
75 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
76 struct rte_eth_dev_data *data = eth_dev->data;
77 struct rte_pktmbuf_pool_private *mbp_priv;
78 struct otx2_eth_rxq *rxq;
83 /* Get rx buffer size */
84 rxq = data->rx_queues[0];
85 mbp_priv = rte_mempool_get_priv(rxq->pool);
86 buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
88 /* Setup scatter mode if needed by jumbo */
89 if (data->dev_conf.rxmode.max_rx_pkt_len > buffsz)
90 dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
92 /* Setup MTU based on max_rx_pkt_len */
93 mtu = data->dev_conf.rxmode.max_rx_pkt_len - NIX_L2_OVERHEAD;
95 rc = otx2_nix_mtu_set(eth_dev, mtu);
97 otx2_err("Failed to set default MTU size %d", rc);
103 nix_cgx_promisc_config(struct rte_eth_dev *eth_dev, int en)
105 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
106 struct otx2_mbox *mbox = dev->mbox;
108 if (otx2_dev_is_vf_or_sdp(dev))
112 otx2_mbox_alloc_msg_cgx_promisc_enable(mbox);
114 otx2_mbox_alloc_msg_cgx_promisc_disable(mbox);
116 otx2_mbox_process(mbox);
120 otx2_nix_promisc_config(struct rte_eth_dev *eth_dev, int en)
122 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
123 struct otx2_mbox *mbox = dev->mbox;
124 struct nix_rx_mode *req;
126 if (otx2_dev_is_vf(dev))
129 req = otx2_mbox_alloc_msg_nix_set_rx_mode(mbox);
132 req->mode = NIX_RX_MODE_UCAST | NIX_RX_MODE_PROMISC;
134 otx2_mbox_process(mbox);
135 eth_dev->data->promiscuous = en;
136 otx2_nix_vlan_update_promisc(eth_dev, en);
140 otx2_nix_promisc_enable(struct rte_eth_dev *eth_dev)
142 otx2_nix_promisc_config(eth_dev, 1);
143 nix_cgx_promisc_config(eth_dev, 1);
149 otx2_nix_promisc_disable(struct rte_eth_dev *eth_dev)
151 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
152 otx2_nix_promisc_config(eth_dev, dev->dmac_filter_enable);
153 nix_cgx_promisc_config(eth_dev, 0);
154 dev->dmac_filter_enable = false;
160 nix_allmulticast_config(struct rte_eth_dev *eth_dev, int en)
162 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
163 struct otx2_mbox *mbox = dev->mbox;
164 struct nix_rx_mode *req;
166 if (otx2_dev_is_vf(dev))
169 req = otx2_mbox_alloc_msg_nix_set_rx_mode(mbox);
172 req->mode = NIX_RX_MODE_UCAST | NIX_RX_MODE_ALLMULTI;
173 else if (eth_dev->data->promiscuous)
174 req->mode = NIX_RX_MODE_UCAST | NIX_RX_MODE_PROMISC;
176 otx2_mbox_process(mbox);
180 otx2_nix_allmulticast_enable(struct rte_eth_dev *eth_dev)
182 nix_allmulticast_config(eth_dev, 1);
188 otx2_nix_allmulticast_disable(struct rte_eth_dev *eth_dev)
190 nix_allmulticast_config(eth_dev, 0);
196 otx2_nix_rxq_info_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
197 struct rte_eth_rxq_info *qinfo)
199 struct otx2_eth_rxq *rxq;
201 rxq = eth_dev->data->rx_queues[queue_id];
203 qinfo->mp = rxq->pool;
204 qinfo->scattered_rx = eth_dev->data->scattered_rx;
205 qinfo->nb_desc = rxq->qconf.nb_desc;
207 qinfo->conf.rx_free_thresh = 0;
208 qinfo->conf.rx_drop_en = 0;
209 qinfo->conf.rx_deferred_start = 0;
210 qinfo->conf.offloads = rxq->offloads;
214 otx2_nix_txq_info_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
215 struct rte_eth_txq_info *qinfo)
217 struct otx2_eth_txq *txq;
219 txq = eth_dev->data->tx_queues[queue_id];
221 qinfo->nb_desc = txq->qconf.nb_desc;
223 qinfo->conf.tx_thresh.pthresh = 0;
224 qinfo->conf.tx_thresh.hthresh = 0;
225 qinfo->conf.tx_thresh.wthresh = 0;
227 qinfo->conf.tx_free_thresh = 0;
228 qinfo->conf.tx_rs_thresh = 0;
229 qinfo->conf.offloads = txq->offloads;
230 qinfo->conf.tx_deferred_start = 0;
234 otx2_rx_burst_mode_get(struct rte_eth_dev *eth_dev,
235 __rte_unused uint16_t queue_id,
236 struct rte_eth_burst_mode *mode)
238 ssize_t bytes = 0, str_size = RTE_ETH_BURST_MODE_INFO_SIZE, rc;
239 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
240 const struct burst_info {
243 } rx_offload_map[] = {
244 {NIX_RX_OFFLOAD_RSS_F, "RSS,"},
245 {NIX_RX_OFFLOAD_PTYPE_F, " Ptype,"},
246 {NIX_RX_OFFLOAD_CHECKSUM_F, " Checksum,"},
247 {NIX_RX_OFFLOAD_VLAN_STRIP_F, " VLAN Strip,"},
248 {NIX_RX_OFFLOAD_MARK_UPDATE_F, " Mark Update,"},
249 {NIX_RX_OFFLOAD_TSTAMP_F, " Timestamp,"},
250 {NIX_RX_MULTI_SEG_F, " Scattered,"}
252 static const char *const burst_mode[] = {"Vector Neon, Rx Offloads:",
253 "Scalar, Rx Offloads:"
257 /* Update burst mode info */
258 rc = rte_strscpy(mode->info + bytes, burst_mode[dev->scalar_ena],
265 /* Update Rx offload info */
266 for (i = 0; i < RTE_DIM(rx_offload_map); i++) {
267 if (dev->rx_offload_flags & rx_offload_map[i].flags) {
268 rc = rte_strscpy(mode->info + bytes,
269 rx_offload_map[i].output,
283 otx2_tx_burst_mode_get(struct rte_eth_dev *eth_dev,
284 __rte_unused uint16_t queue_id,
285 struct rte_eth_burst_mode *mode)
287 ssize_t bytes = 0, str_size = RTE_ETH_BURST_MODE_INFO_SIZE, rc;
288 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
289 const struct burst_info {
292 } tx_offload_map[] = {
293 {NIX_TX_OFFLOAD_L3_L4_CSUM_F, " Inner L3/L4 csum,"},
294 {NIX_TX_OFFLOAD_OL3_OL4_CSUM_F, " Outer L3/L4 csum,"},
295 {NIX_TX_OFFLOAD_VLAN_QINQ_F, " VLAN Insertion,"},
296 {NIX_TX_OFFLOAD_MBUF_NOFF_F, " MBUF free disable,"},
297 {NIX_TX_OFFLOAD_TSTAMP_F, " Timestamp,"},
298 {NIX_TX_OFFLOAD_TSO_F, " TSO,"},
299 {NIX_TX_MULTI_SEG_F, " Scattered,"}
301 static const char *const burst_mode[] = {"Vector Neon, Tx Offloads:",
302 "Scalar, Tx Offloads:"
306 /* Update burst mode info */
307 rc = rte_strscpy(mode->info + bytes, burst_mode[dev->scalar_ena],
314 /* Update Tx offload info */
315 for (i = 0; i < RTE_DIM(tx_offload_map); i++) {
316 if (dev->tx_offload_flags & tx_offload_map[i].flags) {
317 rc = rte_strscpy(mode->info + bytes,
318 tx_offload_map[i].output,
332 nix_rx_head_tail_get(struct otx2_eth_dev *dev,
333 uint32_t *head, uint32_t *tail, uint16_t queue_idx)
337 if (head == NULL || tail == NULL)
340 reg = (((uint64_t)queue_idx) << 32);
341 val = otx2_atomic64_add_nosync(reg, (int64_t *)
342 (dev->base + NIX_LF_CQ_OP_STATUS));
343 if (val & (OP_ERR | CQ_ERR))
346 *tail = (uint32_t)(val & 0xFFFFF);
347 *head = (uint32_t)((val >> 20) & 0xFFFFF);
351 otx2_nix_rx_queue_count(struct rte_eth_dev *eth_dev, uint16_t queue_idx)
353 struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[queue_idx];
354 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
357 nix_rx_head_tail_get(dev, &head, &tail, queue_idx);
358 return (tail - head) % rxq->qlen;
362 nix_offset_has_packet(uint32_t head, uint32_t tail, uint16_t offset)
364 /* Check given offset(queue index) has packet filled by HW */
365 if (tail > head && offset <= tail && offset >= head)
367 /* Wrap around case */
368 if (head > tail && (offset >= head || offset <= tail))
375 otx2_nix_rx_descriptor_done(void *rx_queue, uint16_t offset)
377 struct otx2_eth_rxq *rxq = rx_queue;
380 nix_rx_head_tail_get(otx2_eth_pmd_priv(rxq->eth_dev),
381 &head, &tail, rxq->rq);
383 return nix_offset_has_packet(head, tail, offset);
387 otx2_nix_rx_descriptor_status(void *rx_queue, uint16_t offset)
389 struct otx2_eth_rxq *rxq = rx_queue;
392 if (rxq->qlen <= offset)
395 nix_rx_head_tail_get(otx2_eth_pmd_priv(rxq->eth_dev),
396 &head, &tail, rxq->rq);
398 if (nix_offset_has_packet(head, tail, offset))
399 return RTE_ETH_RX_DESC_DONE;
401 return RTE_ETH_RX_DESC_AVAIL;
405 nix_tx_head_tail_get(struct otx2_eth_dev *dev,
406 uint32_t *head, uint32_t *tail, uint16_t queue_idx)
410 if (head == NULL || tail == NULL)
413 reg = (((uint64_t)queue_idx) << 32);
414 val = otx2_atomic64_add_nosync(reg, (int64_t *)
415 (dev->base + NIX_LF_SQ_OP_STATUS));
419 *tail = (uint32_t)((val >> 28) & 0x3F);
420 *head = (uint32_t)((val >> 20) & 0x3F);
424 otx2_nix_tx_descriptor_status(void *tx_queue, uint16_t offset)
426 struct otx2_eth_txq *txq = tx_queue;
429 if (txq->qconf.nb_desc <= offset)
432 nix_tx_head_tail_get(txq->dev, &head, &tail, txq->sq);
434 if (nix_offset_has_packet(head, tail, offset))
435 return RTE_ETH_TX_DESC_DONE;
437 return RTE_ETH_TX_DESC_FULL;
440 /* It is a NOP for octeontx2 as HW frees the buffer on xmit */
442 otx2_nix_tx_done_cleanup(void *txq, uint32_t free_cnt)
445 RTE_SET_USED(free_cnt);
451 otx2_nix_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
454 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
455 int rc = (int)fw_size;
457 if (fw_size > sizeof(dev->mkex_pfl_name))
458 rc = sizeof(dev->mkex_pfl_name);
460 rc = strlcpy(fw_version, (char *)dev->mkex_pfl_name, rc);
462 rc += 1; /* Add the size of '\0' */
463 if (fw_size < (uint32_t)rc)
470 otx2_nix_pool_ops_supported(struct rte_eth_dev *eth_dev, const char *pool)
472 RTE_SET_USED(eth_dev);
474 if (!strcmp(pool, rte_mbuf_platform_mempool_ops()))
481 otx2_nix_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
482 enum rte_filter_type filter_type,
483 enum rte_filter_op filter_op, void *arg)
485 RTE_SET_USED(eth_dev);
487 if (filter_type != RTE_ETH_FILTER_GENERIC) {
488 otx2_err("Unsupported filter type %d", filter_type);
492 if (filter_op == RTE_ETH_FILTER_GET) {
493 *(const void **)arg = &otx2_flow_ops;
497 otx2_err("Invalid filter_op %d", filter_op);
501 static struct cgx_fw_data *
502 nix_get_fwdata(struct otx2_eth_dev *dev)
504 struct otx2_mbox *mbox = dev->mbox;
505 struct cgx_fw_data *rsp = NULL;
508 otx2_mbox_alloc_msg_cgx_get_aux_link_info(mbox);
510 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
512 otx2_err("Failed to get fw data: %d", rc);
520 otx2_nix_get_module_info(struct rte_eth_dev *eth_dev,
521 struct rte_eth_dev_module_info *modinfo)
523 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
524 struct cgx_fw_data *rsp;
526 rsp = nix_get_fwdata(dev);
530 modinfo->type = rsp->fwdata.sfp_eeprom.sff_id;
531 modinfo->eeprom_len = SFP_EEPROM_SIZE;
537 otx2_nix_get_module_eeprom(struct rte_eth_dev *eth_dev,
538 struct rte_dev_eeprom_info *info)
540 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
541 struct cgx_fw_data *rsp;
543 if (!info->data || !info->length ||
544 (info->offset + info->length > SFP_EEPROM_SIZE))
547 rsp = nix_get_fwdata(dev);
551 otx2_mbox_memcpy(info->data, rsp->fwdata.sfp_eeprom.buf + info->offset,
558 otx2_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
560 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
561 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
563 devinfo->min_rx_bufsize = NIX_MIN_FRS;
564 devinfo->max_rx_pktlen = NIX_MAX_FRS;
565 devinfo->max_rx_queues = RTE_MAX_QUEUES_PER_PORT;
566 devinfo->max_tx_queues = RTE_MAX_QUEUES_PER_PORT;
567 devinfo->max_mac_addrs = dev->max_mac_entries;
568 devinfo->max_vfs = pci_dev->max_vfs;
569 devinfo->max_mtu = devinfo->max_rx_pktlen - NIX_L2_OVERHEAD;
570 devinfo->min_mtu = devinfo->min_rx_bufsize - NIX_L2_OVERHEAD;
572 devinfo->rx_offload_capa = dev->rx_offload_capa;
573 devinfo->tx_offload_capa = dev->tx_offload_capa;
574 devinfo->rx_queue_offload_capa = 0;
575 devinfo->tx_queue_offload_capa = 0;
577 devinfo->reta_size = dev->rss_info.rss_size;
578 devinfo->hash_key_size = NIX_HASH_KEY_SIZE;
579 devinfo->flow_type_rss_offloads = NIX_RSS_OFFLOAD;
581 devinfo->default_rxconf = (struct rte_eth_rxconf) {
586 devinfo->default_txconf = (struct rte_eth_txconf) {
590 devinfo->default_rxportconf = (struct rte_eth_dev_portconf) {
591 .ring_size = NIX_RX_DEFAULT_RING_SZ,
594 devinfo->rx_desc_lim = (struct rte_eth_desc_lim) {
595 .nb_max = UINT16_MAX,
596 .nb_min = NIX_RX_MIN_DESC,
597 .nb_align = NIX_RX_MIN_DESC_ALIGN,
598 .nb_seg_max = NIX_RX_NB_SEG_MAX,
599 .nb_mtu_seg_max = NIX_RX_NB_SEG_MAX,
601 devinfo->rx_desc_lim.nb_max =
602 RTE_ALIGN_MUL_FLOOR(devinfo->rx_desc_lim.nb_max,
603 NIX_RX_MIN_DESC_ALIGN);
605 devinfo->tx_desc_lim = (struct rte_eth_desc_lim) {
606 .nb_max = UINT16_MAX,
609 .nb_seg_max = NIX_TX_NB_SEG_MAX,
610 .nb_mtu_seg_max = NIX_TX_NB_SEG_MAX,
613 /* Auto negotiation disabled */
614 devinfo->speed_capa = ETH_LINK_SPEED_FIXED;
615 if (!otx2_dev_is_vf_or_sdp(dev) && !otx2_dev_is_lbk(dev)) {
616 devinfo->speed_capa |= ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
617 ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G;
619 /* 50G and 100G to be supported for board version C0
622 if (!otx2_dev_is_Ax(dev))
623 devinfo->speed_capa |= ETH_LINK_SPEED_50G |
627 devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
628 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;