1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include <rte_mbuf_pool_ops.h>
7 #include "otx2_ethdev.h"
10 nix_cgx_promisc_config(struct rte_eth_dev *eth_dev, int en)
12 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
13 struct otx2_mbox *mbox = dev->mbox;
15 if (otx2_dev_is_vf(dev))
19 otx2_mbox_alloc_msg_cgx_promisc_enable(mbox);
21 otx2_mbox_alloc_msg_cgx_promisc_disable(mbox);
23 otx2_mbox_process(mbox);
27 otx2_nix_promisc_config(struct rte_eth_dev *eth_dev, int en)
29 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
30 struct otx2_mbox *mbox = dev->mbox;
31 struct nix_rx_mode *req;
33 if (otx2_dev_is_vf(dev))
36 req = otx2_mbox_alloc_msg_nix_set_rx_mode(mbox);
39 req->mode = NIX_RX_MODE_UCAST | NIX_RX_MODE_PROMISC;
41 otx2_mbox_process(mbox);
42 eth_dev->data->promiscuous = en;
43 otx2_nix_vlan_update_promisc(eth_dev, en);
47 otx2_nix_promisc_enable(struct rte_eth_dev *eth_dev)
49 otx2_nix_promisc_config(eth_dev, 1);
50 nix_cgx_promisc_config(eth_dev, 1);
54 otx2_nix_promisc_disable(struct rte_eth_dev *eth_dev)
56 otx2_nix_promisc_config(eth_dev, 0);
57 nix_cgx_promisc_config(eth_dev, 0);
61 nix_allmulticast_config(struct rte_eth_dev *eth_dev, int en)
63 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
64 struct otx2_mbox *mbox = dev->mbox;
65 struct nix_rx_mode *req;
67 if (otx2_dev_is_vf(dev))
70 req = otx2_mbox_alloc_msg_nix_set_rx_mode(mbox);
73 req->mode = NIX_RX_MODE_UCAST | NIX_RX_MODE_ALLMULTI;
74 else if (eth_dev->data->promiscuous)
75 req->mode = NIX_RX_MODE_UCAST | NIX_RX_MODE_PROMISC;
77 otx2_mbox_process(mbox);
81 otx2_nix_allmulticast_enable(struct rte_eth_dev *eth_dev)
83 nix_allmulticast_config(eth_dev, 1);
87 otx2_nix_allmulticast_disable(struct rte_eth_dev *eth_dev)
89 nix_allmulticast_config(eth_dev, 0);
93 otx2_nix_rxq_info_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
94 struct rte_eth_rxq_info *qinfo)
96 struct otx2_eth_rxq *rxq;
98 rxq = eth_dev->data->rx_queues[queue_id];
100 qinfo->mp = rxq->pool;
101 qinfo->scattered_rx = eth_dev->data->scattered_rx;
102 qinfo->nb_desc = rxq->qconf.nb_desc;
104 qinfo->conf.rx_free_thresh = 0;
105 qinfo->conf.rx_drop_en = 0;
106 qinfo->conf.rx_deferred_start = 0;
107 qinfo->conf.offloads = rxq->offloads;
111 otx2_nix_txq_info_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
112 struct rte_eth_txq_info *qinfo)
114 struct otx2_eth_txq *txq;
116 txq = eth_dev->data->tx_queues[queue_id];
118 qinfo->nb_desc = txq->qconf.nb_desc;
120 qinfo->conf.tx_thresh.pthresh = 0;
121 qinfo->conf.tx_thresh.hthresh = 0;
122 qinfo->conf.tx_thresh.wthresh = 0;
124 qinfo->conf.tx_free_thresh = 0;
125 qinfo->conf.tx_rs_thresh = 0;
126 qinfo->conf.offloads = txq->offloads;
127 qinfo->conf.tx_deferred_start = 0;
131 nix_rx_head_tail_get(struct otx2_eth_dev *dev,
132 uint32_t *head, uint32_t *tail, uint16_t queue_idx)
136 if (head == NULL || tail == NULL)
139 reg = (((uint64_t)queue_idx) << 32);
140 val = otx2_atomic64_add_nosync(reg, (int64_t *)
141 (dev->base + NIX_LF_CQ_OP_STATUS));
142 if (val & (OP_ERR | CQ_ERR))
145 *tail = (uint32_t)(val & 0xFFFFF);
146 *head = (uint32_t)((val >> 20) & 0xFFFFF);
150 otx2_nix_rx_queue_count(struct rte_eth_dev *eth_dev, uint16_t queue_idx)
152 struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[queue_idx];
153 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
156 nix_rx_head_tail_get(dev, &head, &tail, queue_idx);
157 return (tail - head) % rxq->qlen;
161 nix_offset_has_packet(uint32_t head, uint32_t tail, uint16_t offset)
163 /* Check given offset(queue index) has packet filled by HW */
164 if (tail > head && offset <= tail && offset >= head)
166 /* Wrap around case */
167 if (head > tail && (offset >= head || offset <= tail))
174 otx2_nix_rx_descriptor_done(void *rx_queue, uint16_t offset)
176 struct otx2_eth_rxq *rxq = rx_queue;
179 nix_rx_head_tail_get(otx2_eth_pmd_priv(rxq->eth_dev),
180 &head, &tail, rxq->rq);
182 return nix_offset_has_packet(head, tail, offset);
186 otx2_nix_rx_descriptor_status(void *rx_queue, uint16_t offset)
188 struct otx2_eth_rxq *rxq = rx_queue;
191 if (rxq->qlen >= offset)
194 nix_rx_head_tail_get(otx2_eth_pmd_priv(rxq->eth_dev),
195 &head, &tail, rxq->rq);
197 if (nix_offset_has_packet(head, tail, offset))
198 return RTE_ETH_RX_DESC_DONE;
200 return RTE_ETH_RX_DESC_AVAIL;
203 /* It is a NOP for octeontx2 as HW frees the buffer on xmit */
205 otx2_nix_tx_done_cleanup(void *txq, uint32_t free_cnt)
208 RTE_SET_USED(free_cnt);
214 otx2_nix_pool_ops_supported(struct rte_eth_dev *eth_dev, const char *pool)
216 RTE_SET_USED(eth_dev);
218 if (!strcmp(pool, rte_mbuf_platform_mempool_ops()))
225 otx2_nix_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
226 enum rte_filter_type filter_type,
227 enum rte_filter_op filter_op, void *arg)
229 RTE_SET_USED(eth_dev);
231 if (filter_type != RTE_ETH_FILTER_GENERIC) {
232 otx2_err("Unsupported filter type %d", filter_type);
236 if (filter_op == RTE_ETH_FILTER_GET) {
237 *(const void **)arg = &otx2_flow_ops;
241 otx2_err("Invalid filter_op %d", filter_op);
245 static struct cgx_fw_data *
246 nix_get_fwdata(struct otx2_eth_dev *dev)
248 struct otx2_mbox *mbox = dev->mbox;
249 struct cgx_fw_data *rsp = NULL;
251 otx2_mbox_alloc_msg_cgx_get_aux_link_info(mbox);
253 otx2_mbox_process_msg(mbox, (void *)&rsp);
259 otx2_nix_get_module_info(struct rte_eth_dev *eth_dev,
260 struct rte_eth_dev_module_info *modinfo)
262 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
263 struct cgx_fw_data *rsp;
265 rsp = nix_get_fwdata(dev);
269 modinfo->type = rsp->fwdata.sfp_eeprom.sff_id;
270 modinfo->eeprom_len = SFP_EEPROM_SIZE;
276 otx2_nix_get_module_eeprom(struct rte_eth_dev *eth_dev,
277 struct rte_dev_eeprom_info *info)
279 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
280 struct cgx_fw_data *rsp;
282 if (!info->data || !info->length ||
283 (info->offset + info->length > SFP_EEPROM_SIZE))
286 rsp = nix_get_fwdata(dev);
290 otx2_mbox_memcpy(info->data, rsp->fwdata.sfp_eeprom.buf + info->offset,
297 otx2_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
299 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
300 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
302 devinfo->min_rx_bufsize = NIX_MIN_FRS;
303 devinfo->max_rx_pktlen = NIX_MAX_FRS;
304 devinfo->max_rx_queues = RTE_MAX_QUEUES_PER_PORT;
305 devinfo->max_tx_queues = RTE_MAX_QUEUES_PER_PORT;
306 devinfo->max_mac_addrs = dev->max_mac_entries;
307 devinfo->max_vfs = pci_dev->max_vfs;
308 devinfo->max_mtu = devinfo->max_rx_pktlen - NIX_L2_OVERHEAD;
309 devinfo->min_mtu = devinfo->min_rx_bufsize - NIX_L2_OVERHEAD;
311 devinfo->rx_offload_capa = dev->rx_offload_capa;
312 devinfo->tx_offload_capa = dev->tx_offload_capa;
313 devinfo->rx_queue_offload_capa = 0;
314 devinfo->tx_queue_offload_capa = 0;
316 devinfo->reta_size = dev->rss_info.rss_size;
317 devinfo->hash_key_size = NIX_HASH_KEY_SIZE;
318 devinfo->flow_type_rss_offloads = NIX_RSS_OFFLOAD;
320 devinfo->default_rxconf = (struct rte_eth_rxconf) {
325 devinfo->default_txconf = (struct rte_eth_txconf) {
329 devinfo->rx_desc_lim = (struct rte_eth_desc_lim) {
330 .nb_max = UINT16_MAX,
331 .nb_min = NIX_RX_MIN_DESC,
332 .nb_align = NIX_RX_MIN_DESC_ALIGN,
333 .nb_seg_max = NIX_RX_NB_SEG_MAX,
334 .nb_mtu_seg_max = NIX_RX_NB_SEG_MAX,
336 devinfo->rx_desc_lim.nb_max =
337 RTE_ALIGN_MUL_FLOOR(devinfo->rx_desc_lim.nb_max,
338 NIX_RX_MIN_DESC_ALIGN);
340 devinfo->tx_desc_lim = (struct rte_eth_desc_lim) {
341 .nb_max = UINT16_MAX,
344 .nb_seg_max = NIX_TX_NB_SEG_MAX,
345 .nb_mtu_seg_max = NIX_TX_NB_SEG_MAX,
348 /* Auto negotiation disabled */
349 devinfo->speed_capa = ETH_LINK_SPEED_FIXED;
350 devinfo->speed_capa |= ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
351 ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
352 ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
354 devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
355 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;