1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include "otx2_ethdev.h"
8 nix_mc_addr_list_free(struct otx2_eth_dev *dev, uint32_t entry_count)
10 struct npc_mcam_free_entry_req *req;
11 struct otx2_mbox *mbox = dev->mbox;
12 struct mcast_entry *entry;
18 TAILQ_FOREACH(entry, &dev->mc_fltr_tbl, next) {
19 req = otx2_mbox_alloc_msg_npc_mcam_free_entry(mbox);
20 req->entry = entry->mcam_index;
22 rc = otx2_mbox_process_msg(mbox, NULL);
26 TAILQ_REMOVE(&dev->mc_fltr_tbl, entry, next);
35 dev->mc_tbl_set = false;
42 nix_hw_update_mc_addr_list(struct rte_eth_dev *eth_dev)
44 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
45 struct otx2_npc_flow_info *npc = &dev->npc_flow;
46 volatile uint8_t *key_data, *key_mask;
47 struct npc_mcam_write_entry_req *req;
48 struct otx2_mbox *mbox = dev->mbox;
49 struct npc_xtract_info *x_info;
50 uint64_t mcam_data, mcam_mask;
51 struct mcast_entry *entry;
57 ld_cfg = &npc->prx_dxcfg;
58 /* Get ETH layer profile info for populating mcam entries */
59 x_info = &(*ld_cfg)[NPC_MCAM_RX][NPC_LID_LA][NPC_LT_LA_ETHER].xtract[0];
61 TAILQ_FOREACH(entry, &dev->mc_fltr_tbl, next) {
62 req = otx2_mbox_alloc_msg_npc_mcam_write_entry(mbox);
64 /* The mbox memory buffer can be full.
67 otx2_mbox_msg_send(mbox, 0);
68 rc = otx2_mbox_wait_for_rsp(mbox, 0);
72 req = otx2_mbox_alloc_msg_npc_mcam_write_entry(mbox);
78 req->entry = entry->mcam_index;
79 req->intf = NPC_MCAM_RX;
80 req->enable_entry = 1;
82 /* Channel base extracted to KW0[11:0] */
83 req->entry_data.kw[0] = dev->rx_chan_base;
84 req->entry_data.kw_mask[0] = RTE_LEN2MASK(12, uint64_t);
86 /* Update mcam address */
87 key_data = (volatile uint8_t *)req->entry_data.kw;
88 key_mask = (volatile uint8_t *)req->entry_data.kw_mask;
91 mcam_mask = RTE_LEN2MASK(48, uint64_t);
92 mac_addr = &entry->mcast_mac.addr_bytes[0];
93 for (idx = RTE_ETHER_ADDR_LEN - 1; idx >= 0; idx--)
94 mcam_data |= ((uint64_t)*mac_addr++) << (8 * idx);
96 otx2_mbox_memcpy(key_data + x_info->key_off,
97 &mcam_data, x_info->len);
98 otx2_mbox_memcpy(key_mask + x_info->key_off,
99 &mcam_mask, x_info->len);
101 action = NIX_RX_ACTIONOP_UCAST;
103 if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
104 action = NIX_RX_ACTIONOP_RSS;
105 action |= (uint64_t)(dev->rss_info.alg_idx) << 56;
108 action |= ((uint64_t)otx2_pfvf_func(dev->pf, dev->vf)) << 4;
109 req->entry_data.action = action;
112 otx2_mbox_msg_send(mbox, 0);
113 rc = otx2_mbox_wait_for_rsp(mbox, 0);
120 otx2_nix_mc_addr_list_install(struct rte_eth_dev *eth_dev)
122 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
123 struct npc_mcam_alloc_entry_req *req;
124 struct npc_mcam_alloc_entry_rsp *rsp;
125 struct otx2_mbox *mbox = dev->mbox;
126 uint32_t entry_count = 0, idx = 0;
127 struct mcast_entry *entry;
130 if (!dev->mc_tbl_set)
133 TAILQ_FOREACH(entry, &dev->mc_fltr_tbl, next)
136 req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(mbox);
137 req->priority = NPC_MCAM_ANY_PRIO;
138 req->count = entry_count;
140 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
141 if (rc || rsp->count < entry_count) {
142 otx2_err("Failed to allocate required mcam entries");
146 TAILQ_FOREACH(entry, &dev->mc_fltr_tbl, next)
147 entry->mcam_index = rsp->entry_list[idx];
149 rc = nix_hw_update_mc_addr_list(eth_dev);
156 otx2_nix_mc_addr_list_uninstall(struct rte_eth_dev *eth_dev)
158 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
159 struct npc_mcam_free_entry_req *req;
160 struct otx2_mbox *mbox = dev->mbox;
161 struct mcast_entry *entry;
164 if (!dev->mc_tbl_set)
167 TAILQ_FOREACH(entry, &dev->mc_fltr_tbl, next) {
168 req = otx2_mbox_alloc_msg_npc_mcam_free_entry(mbox);
170 otx2_mbox_msg_send(mbox, 0);
171 rc = otx2_mbox_wait_for_rsp(mbox, 0);
175 req = otx2_mbox_alloc_msg_npc_mcam_free_entry(mbox);
181 req->entry = entry->mcam_index;
184 otx2_mbox_msg_send(mbox, 0);
185 rc = otx2_mbox_wait_for_rsp(mbox, 0);
192 nix_setup_mc_addr_list(struct otx2_eth_dev *dev,
193 struct rte_ether_addr *mc_addr_set)
195 struct npc_mcam_ena_dis_entry_req *req;
196 struct otx2_mbox *mbox = dev->mbox;
197 struct mcast_entry *entry;
201 /* Populate PMD's mcast list with given mcast mac addresses and
202 * disable all mcam entries pertaining to the mcast list.
204 TAILQ_FOREACH(entry, &dev->mc_fltr_tbl, next) {
205 rte_memcpy(&entry->mcast_mac, &mc_addr_set[idx++],
208 req = otx2_mbox_alloc_msg_npc_mcam_dis_entry(mbox);
210 otx2_mbox_msg_send(mbox, 0);
211 rc = otx2_mbox_wait_for_rsp(mbox, 0);
215 req = otx2_mbox_alloc_msg_npc_mcam_dis_entry(mbox);
221 req->entry = entry->mcam_index;
224 otx2_mbox_msg_send(mbox, 0);
225 rc = otx2_mbox_wait_for_rsp(mbox, 0);
232 otx2_nix_set_mc_addr_list(struct rte_eth_dev *eth_dev,
233 struct rte_ether_addr *mc_addr_set,
236 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
237 struct npc_mcam_alloc_entry_req *req;
238 struct npc_mcam_alloc_entry_rsp *rsp;
239 struct otx2_mbox *mbox = dev->mbox;
240 uint32_t idx, priv_count = 0;
241 struct mcast_entry *entry;
244 if (otx2_dev_is_vf(dev))
247 TAILQ_FOREACH(entry, &dev->mc_fltr_tbl, next)
250 if (nb_mc_addr == 0 || mc_addr_set == NULL) {
251 /* Free existing list if new list is null */
252 nb_mc_addr = priv_count;
256 for (idx = 0; idx < nb_mc_addr; idx++) {
257 if (!rte_is_multicast_ether_addr(&mc_addr_set[idx]))
261 /* New list is bigger than the existing list,
262 * allocate mcam entries for the extra entries.
264 if (nb_mc_addr > priv_count) {
265 req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(mbox);
266 req->priority = NPC_MCAM_ANY_PRIO;
267 req->count = nb_mc_addr - priv_count;
269 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
270 if (rc || (rsp->count + priv_count < nb_mc_addr)) {
271 otx2_err("Failed to allocate required entries");
272 nb_mc_addr = priv_count;
276 /* Append new mcam entries to the existing mc list */
277 for (idx = 0; idx < rsp->count; idx++) {
278 entry = rte_zmalloc("otx2_nix_mc_entry",
279 sizeof(struct mcast_entry), 0);
281 otx2_err("Failed to allocate memory");
282 nb_mc_addr = priv_count;
286 entry->mcam_index = rsp->entry_list[idx];
287 TAILQ_INSERT_HEAD(&dev->mc_fltr_tbl, entry, next);
290 /* Free the extra mcam entries if the new list is smaller
293 nix_mc_addr_list_free(dev, priv_count - nb_mc_addr);
297 /* Now mc_fltr_tbl has the required number of mcam entries,
298 * Traverse through it and add new multicast filter table entries.
300 rc = nix_setup_mc_addr_list(dev, mc_addr_set);
304 rc = nix_hw_update_mc_addr_list(eth_dev);
308 dev->mc_tbl_set = true;
313 nix_mc_addr_list_free(dev, nb_mc_addr);
318 otx2_nix_mc_filter_init(struct otx2_eth_dev *dev)
320 if (otx2_dev_is_vf(dev))
323 TAILQ_INIT(&dev->mc_fltr_tbl);
327 otx2_nix_mc_filter_fini(struct otx2_eth_dev *dev)
329 struct mcast_entry *entry;
332 if (otx2_dev_is_vf(dev))
335 TAILQ_FOREACH(entry, &dev->mc_fltr_tbl, next)
338 nix_mc_addr_list_free(dev, count);