1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include <rte_mbuf_pool_ops.h>
7 #include "otx2_ethdev.h"
10 otx2_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
12 uint32_t buffsz, frame_size = mtu + NIX_L2_OVERHEAD;
13 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
14 struct rte_eth_dev_data *data = eth_dev->data;
15 struct otx2_mbox *mbox = dev->mbox;
16 struct nix_frs_cfg *req;
19 /* Check if MTU is within the allowed range */
20 if (frame_size < NIX_MIN_FRS || frame_size > NIX_MAX_FRS)
23 buffsz = data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
25 /* Refuse MTU that requires the support of scattered packets
26 * when this feature has not been enabled before.
28 if (data->dev_started && frame_size > buffsz &&
29 !(dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER))
32 /* Check <seg size> * <max_seg> >= max_frame */
33 if ((dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER) &&
34 (frame_size > buffsz * NIX_RX_NB_SEG_MAX))
37 req = otx2_mbox_alloc_msg_nix_set_hw_frs(mbox);
38 req->update_smq = true;
39 if (otx2_dev_is_sdp(dev))
41 /* FRS HW config should exclude FCS but include NPC VTAG insert size */
42 req->maxlen = frame_size - RTE_ETHER_CRC_LEN + NIX_MAX_VTAG_ACT_SIZE;
44 rc = otx2_mbox_process(mbox);
48 /* Now just update Rx MAXLEN */
49 req = otx2_mbox_alloc_msg_nix_set_hw_frs(mbox);
50 req->maxlen = frame_size - RTE_ETHER_CRC_LEN;
51 if (otx2_dev_is_sdp(dev))
54 rc = otx2_mbox_process(mbox);
58 if (frame_size > RTE_ETHER_MAX_LEN)
59 dev->rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
61 dev->rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
63 /* Update max_rx_pkt_len */
64 data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
70 otx2_nix_recalc_mtu(struct rte_eth_dev *eth_dev)
72 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
73 struct rte_eth_dev_data *data = eth_dev->data;
74 struct rte_pktmbuf_pool_private *mbp_priv;
75 struct otx2_eth_rxq *rxq;
80 /* Get rx buffer size */
81 rxq = data->rx_queues[0];
82 mbp_priv = rte_mempool_get_priv(rxq->pool);
83 buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
85 /* Setup scatter mode if needed by jumbo */
86 if (data->dev_conf.rxmode.max_rx_pkt_len > buffsz)
87 dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
89 /* Setup MTU based on max_rx_pkt_len */
90 mtu = data->dev_conf.rxmode.max_rx_pkt_len - NIX_L2_OVERHEAD;
92 rc = otx2_nix_mtu_set(eth_dev, mtu);
94 otx2_err("Failed to set default MTU size %d", rc);
100 nix_cgx_promisc_config(struct rte_eth_dev *eth_dev, int en)
102 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
103 struct otx2_mbox *mbox = dev->mbox;
105 if (otx2_dev_is_vf_or_sdp(dev))
109 otx2_mbox_alloc_msg_cgx_promisc_enable(mbox);
111 otx2_mbox_alloc_msg_cgx_promisc_disable(mbox);
113 otx2_mbox_process(mbox);
117 otx2_nix_promisc_config(struct rte_eth_dev *eth_dev, int en)
119 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
120 struct otx2_mbox *mbox = dev->mbox;
121 struct nix_rx_mode *req;
123 if (otx2_dev_is_vf(dev))
126 req = otx2_mbox_alloc_msg_nix_set_rx_mode(mbox);
129 req->mode = NIX_RX_MODE_UCAST | NIX_RX_MODE_PROMISC;
131 otx2_mbox_process(mbox);
132 eth_dev->data->promiscuous = en;
133 otx2_nix_vlan_update_promisc(eth_dev, en);
137 otx2_nix_promisc_enable(struct rte_eth_dev *eth_dev)
139 otx2_nix_promisc_config(eth_dev, 1);
140 nix_cgx_promisc_config(eth_dev, 1);
146 otx2_nix_promisc_disable(struct rte_eth_dev *eth_dev)
148 otx2_nix_promisc_config(eth_dev, 0);
149 nix_cgx_promisc_config(eth_dev, 0);
155 nix_allmulticast_config(struct rte_eth_dev *eth_dev, int en)
157 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
158 struct otx2_mbox *mbox = dev->mbox;
159 struct nix_rx_mode *req;
161 if (otx2_dev_is_vf(dev))
164 req = otx2_mbox_alloc_msg_nix_set_rx_mode(mbox);
167 req->mode = NIX_RX_MODE_UCAST | NIX_RX_MODE_ALLMULTI;
168 else if (eth_dev->data->promiscuous)
169 req->mode = NIX_RX_MODE_UCAST | NIX_RX_MODE_PROMISC;
171 otx2_mbox_process(mbox);
175 otx2_nix_allmulticast_enable(struct rte_eth_dev *eth_dev)
177 nix_allmulticast_config(eth_dev, 1);
183 otx2_nix_allmulticast_disable(struct rte_eth_dev *eth_dev)
185 nix_allmulticast_config(eth_dev, 0);
191 otx2_nix_rxq_info_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
192 struct rte_eth_rxq_info *qinfo)
194 struct otx2_eth_rxq *rxq;
196 rxq = eth_dev->data->rx_queues[queue_id];
198 qinfo->mp = rxq->pool;
199 qinfo->scattered_rx = eth_dev->data->scattered_rx;
200 qinfo->nb_desc = rxq->qconf.nb_desc;
202 qinfo->conf.rx_free_thresh = 0;
203 qinfo->conf.rx_drop_en = 0;
204 qinfo->conf.rx_deferred_start = 0;
205 qinfo->conf.offloads = rxq->offloads;
209 otx2_nix_txq_info_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
210 struct rte_eth_txq_info *qinfo)
212 struct otx2_eth_txq *txq;
214 txq = eth_dev->data->tx_queues[queue_id];
216 qinfo->nb_desc = txq->qconf.nb_desc;
218 qinfo->conf.tx_thresh.pthresh = 0;
219 qinfo->conf.tx_thresh.hthresh = 0;
220 qinfo->conf.tx_thresh.wthresh = 0;
222 qinfo->conf.tx_free_thresh = 0;
223 qinfo->conf.tx_rs_thresh = 0;
224 qinfo->conf.offloads = txq->offloads;
225 qinfo->conf.tx_deferred_start = 0;
229 nix_rx_head_tail_get(struct otx2_eth_dev *dev,
230 uint32_t *head, uint32_t *tail, uint16_t queue_idx)
234 if (head == NULL || tail == NULL)
237 reg = (((uint64_t)queue_idx) << 32);
238 val = otx2_atomic64_add_nosync(reg, (int64_t *)
239 (dev->base + NIX_LF_CQ_OP_STATUS));
240 if (val & (OP_ERR | CQ_ERR))
243 *tail = (uint32_t)(val & 0xFFFFF);
244 *head = (uint32_t)((val >> 20) & 0xFFFFF);
248 otx2_nix_rx_queue_count(struct rte_eth_dev *eth_dev, uint16_t queue_idx)
250 struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[queue_idx];
251 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
254 nix_rx_head_tail_get(dev, &head, &tail, queue_idx);
255 return (tail - head) % rxq->qlen;
259 nix_offset_has_packet(uint32_t head, uint32_t tail, uint16_t offset)
261 /* Check given offset(queue index) has packet filled by HW */
262 if (tail > head && offset <= tail && offset >= head)
264 /* Wrap around case */
265 if (head > tail && (offset >= head || offset <= tail))
272 otx2_nix_rx_descriptor_done(void *rx_queue, uint16_t offset)
274 struct otx2_eth_rxq *rxq = rx_queue;
277 nix_rx_head_tail_get(otx2_eth_pmd_priv(rxq->eth_dev),
278 &head, &tail, rxq->rq);
280 return nix_offset_has_packet(head, tail, offset);
284 otx2_nix_rx_descriptor_status(void *rx_queue, uint16_t offset)
286 struct otx2_eth_rxq *rxq = rx_queue;
289 if (rxq->qlen <= offset)
292 nix_rx_head_tail_get(otx2_eth_pmd_priv(rxq->eth_dev),
293 &head, &tail, rxq->rq);
295 if (nix_offset_has_packet(head, tail, offset))
296 return RTE_ETH_RX_DESC_DONE;
298 return RTE_ETH_RX_DESC_AVAIL;
302 nix_tx_head_tail_get(struct otx2_eth_dev *dev,
303 uint32_t *head, uint32_t *tail, uint16_t queue_idx)
307 if (head == NULL || tail == NULL)
310 reg = (((uint64_t)queue_idx) << 32);
311 val = otx2_atomic64_add_nosync(reg, (int64_t *)
312 (dev->base + NIX_LF_SQ_OP_STATUS));
316 *tail = (uint32_t)((val >> 28) & 0x3F);
317 *head = (uint32_t)((val >> 20) & 0x3F);
321 otx2_nix_tx_descriptor_status(void *tx_queue, uint16_t offset)
323 struct otx2_eth_txq *txq = tx_queue;
326 if (txq->qconf.nb_desc <= offset)
329 nix_tx_head_tail_get(txq->dev, &head, &tail, txq->sq);
331 if (nix_offset_has_packet(head, tail, offset))
332 return RTE_ETH_TX_DESC_DONE;
334 return RTE_ETH_TX_DESC_FULL;
337 /* It is a NOP for octeontx2 as HW frees the buffer on xmit */
339 otx2_nix_tx_done_cleanup(void *txq, uint32_t free_cnt)
342 RTE_SET_USED(free_cnt);
348 otx2_nix_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
351 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
352 int rc = (int)fw_size;
354 if (fw_size > sizeof(dev->mkex_pfl_name))
355 rc = sizeof(dev->mkex_pfl_name);
357 rc = strlcpy(fw_version, (char *)dev->mkex_pfl_name, rc);
359 rc += 1; /* Add the size of '\0' */
360 if (fw_size < (uint32_t)rc)
367 otx2_nix_pool_ops_supported(struct rte_eth_dev *eth_dev, const char *pool)
369 RTE_SET_USED(eth_dev);
371 if (!strcmp(pool, rte_mbuf_platform_mempool_ops()))
378 otx2_nix_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
379 enum rte_filter_type filter_type,
380 enum rte_filter_op filter_op, void *arg)
382 RTE_SET_USED(eth_dev);
384 if (filter_type != RTE_ETH_FILTER_GENERIC) {
385 otx2_err("Unsupported filter type %d", filter_type);
389 if (filter_op == RTE_ETH_FILTER_GET) {
390 *(const void **)arg = &otx2_flow_ops;
394 otx2_err("Invalid filter_op %d", filter_op);
398 static struct cgx_fw_data *
399 nix_get_fwdata(struct otx2_eth_dev *dev)
401 struct otx2_mbox *mbox = dev->mbox;
402 struct cgx_fw_data *rsp = NULL;
405 otx2_mbox_alloc_msg_cgx_get_aux_link_info(mbox);
407 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
409 otx2_err("Failed to get fw data: %d", rc);
417 otx2_nix_get_module_info(struct rte_eth_dev *eth_dev,
418 struct rte_eth_dev_module_info *modinfo)
420 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
421 struct cgx_fw_data *rsp;
423 rsp = nix_get_fwdata(dev);
427 modinfo->type = rsp->fwdata.sfp_eeprom.sff_id;
428 modinfo->eeprom_len = SFP_EEPROM_SIZE;
434 otx2_nix_get_module_eeprom(struct rte_eth_dev *eth_dev,
435 struct rte_dev_eeprom_info *info)
437 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
438 struct cgx_fw_data *rsp;
440 if (!info->data || !info->length ||
441 (info->offset + info->length > SFP_EEPROM_SIZE))
444 rsp = nix_get_fwdata(dev);
448 otx2_mbox_memcpy(info->data, rsp->fwdata.sfp_eeprom.buf + info->offset,
455 otx2_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
457 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
458 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
460 devinfo->min_rx_bufsize = NIX_MIN_FRS;
461 devinfo->max_rx_pktlen = NIX_MAX_FRS;
462 devinfo->max_rx_queues = RTE_MAX_QUEUES_PER_PORT;
463 devinfo->max_tx_queues = RTE_MAX_QUEUES_PER_PORT;
464 devinfo->max_mac_addrs = dev->max_mac_entries;
465 devinfo->max_vfs = pci_dev->max_vfs;
466 devinfo->max_mtu = devinfo->max_rx_pktlen - NIX_L2_OVERHEAD;
467 devinfo->min_mtu = devinfo->min_rx_bufsize - NIX_L2_OVERHEAD;
469 devinfo->rx_offload_capa = dev->rx_offload_capa;
470 devinfo->tx_offload_capa = dev->tx_offload_capa;
471 devinfo->rx_queue_offload_capa = 0;
472 devinfo->tx_queue_offload_capa = 0;
474 devinfo->reta_size = dev->rss_info.rss_size;
475 devinfo->hash_key_size = NIX_HASH_KEY_SIZE;
476 devinfo->flow_type_rss_offloads = NIX_RSS_OFFLOAD;
478 devinfo->default_rxconf = (struct rte_eth_rxconf) {
483 devinfo->default_txconf = (struct rte_eth_txconf) {
487 devinfo->default_rxportconf = (struct rte_eth_dev_portconf) {
488 .ring_size = NIX_RX_DEFAULT_RING_SZ,
491 devinfo->rx_desc_lim = (struct rte_eth_desc_lim) {
492 .nb_max = UINT16_MAX,
493 .nb_min = NIX_RX_MIN_DESC,
494 .nb_align = NIX_RX_MIN_DESC_ALIGN,
495 .nb_seg_max = NIX_RX_NB_SEG_MAX,
496 .nb_mtu_seg_max = NIX_RX_NB_SEG_MAX,
498 devinfo->rx_desc_lim.nb_max =
499 RTE_ALIGN_MUL_FLOOR(devinfo->rx_desc_lim.nb_max,
500 NIX_RX_MIN_DESC_ALIGN);
502 devinfo->tx_desc_lim = (struct rte_eth_desc_lim) {
503 .nb_max = UINT16_MAX,
506 .nb_seg_max = NIX_TX_NB_SEG_MAX,
507 .nb_mtu_seg_max = NIX_TX_NB_SEG_MAX,
510 /* Auto negotiation disabled */
511 devinfo->speed_capa = ETH_LINK_SPEED_FIXED;
512 devinfo->speed_capa |= ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
513 ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
514 ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
516 devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
517 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;