1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include <rte_malloc.h>
8 #include "otx2_ethdev.h"
12 #define VLAN_ID_MATCH 0x1
13 #define VTAG_F_MATCH 0x2
14 #define MAC_ADDR_MATCH 0x4
15 #define QINQ_F_MATCH 0x8
16 #define VLAN_DROP 0x10
17 #define DEF_F_ENTRY 0x20
25 __rte_unused nix_vlan_mcam_enb_dis(struct otx2_eth_dev *dev,
26 uint32_t entry, const int enable)
28 struct npc_mcam_ena_dis_entry_req *req;
29 struct otx2_mbox *mbox = dev->mbox;
33 req = otx2_mbox_alloc_msg_npc_mcam_ena_entry(mbox);
35 req = otx2_mbox_alloc_msg_npc_mcam_dis_entry(mbox);
39 rc = otx2_mbox_process_msg(mbox, NULL);
44 nix_set_rx_vlan_action(struct rte_eth_dev *eth_dev,
45 struct mcam_entry *entry, bool qinq, bool drop)
47 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
48 int pcifunc = otx2_pfvf_func(dev->pf, dev->vf);
49 uint64_t action = 0, vtag_action = 0;
51 action = NIX_RX_ACTIONOP_UCAST;
53 if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
54 action = NIX_RX_ACTIONOP_RSS;
55 action |= (uint64_t)(dev->rss_info.alg_idx) << 56;
58 action |= (uint64_t)pcifunc << 4;
59 entry->action = action;
62 entry->action &= ~((uint64_t)0xF);
63 entry->action |= NIX_RX_ACTIONOP_DROP;
68 /* VTAG0 fields denote CTAG in single vlan case */
69 vtag_action |= (NIX_RX_VTAGACTION_VTAG_VALID << 15);
70 vtag_action |= (NPC_LID_LB << 8);
71 vtag_action |= NIX_RX_VTAGACTION_VTAG0_RELPTR;
73 /* VTAG0 & VTAG1 fields denote CTAG & STAG respectively */
74 vtag_action |= (NIX_RX_VTAGACTION_VTAG_VALID << 15);
75 vtag_action |= (NPC_LID_LB << 8);
76 vtag_action |= NIX_RX_VTAGACTION_VTAG1_RELPTR;
77 vtag_action |= (NIX_RX_VTAGACTION_VTAG_VALID << 47);
78 vtag_action |= ((uint64_t)(NPC_LID_LB) << 40);
79 vtag_action |= (NIX_RX_VTAGACTION_VTAG0_RELPTR << 32);
82 entry->vtag_action = vtag_action;
86 nix_vlan_mcam_free(struct otx2_eth_dev *dev, uint32_t entry)
88 struct npc_mcam_free_entry_req *req;
89 struct otx2_mbox *mbox = dev->mbox;
92 req = otx2_mbox_alloc_msg_npc_mcam_free_entry(mbox);
95 rc = otx2_mbox_process_msg(mbox, NULL);
100 nix_vlan_mcam_write(struct rte_eth_dev *eth_dev, uint16_t ent_idx,
101 struct mcam_entry *entry, uint8_t intf, uint8_t ena)
103 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
104 struct npc_mcam_write_entry_req *req;
105 struct otx2_mbox *mbox = dev->mbox;
109 req = otx2_mbox_alloc_msg_npc_mcam_write_entry(mbox);
111 req->entry = ent_idx;
113 req->enable_entry = ena;
114 memcpy(&req->entry_data, entry, sizeof(struct mcam_entry));
116 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
121 nix_vlan_mcam_alloc_and_write(struct rte_eth_dev *eth_dev,
122 struct mcam_entry *entry,
123 uint8_t intf, bool drop)
125 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
126 struct npc_mcam_alloc_and_write_entry_req *req;
127 struct npc_mcam_alloc_and_write_entry_rsp *rsp;
128 struct otx2_mbox *mbox = dev->mbox;
131 req = otx2_mbox_alloc_msg_npc_mcam_alloc_and_write_entry(mbox);
133 if (intf == NPC_MCAM_RX) {
134 if (!drop && dev->vlan_info.def_rx_mcam_idx) {
135 req->priority = NPC_MCAM_HIGHER_PRIO;
136 req->ref_entry = dev->vlan_info.def_rx_mcam_idx;
137 } else if (drop && dev->vlan_info.qinq_mcam_idx) {
138 req->priority = NPC_MCAM_LOWER_PRIO;
139 req->ref_entry = dev->vlan_info.qinq_mcam_idx;
141 req->priority = NPC_MCAM_ANY_PRIO;
145 req->priority = NPC_MCAM_ANY_PRIO;
150 req->enable_entry = 1;
151 memcpy(&req->entry_data, entry, sizeof(struct mcam_entry));
153 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
161 nix_vlan_update_mac(struct rte_eth_dev *eth_dev, int mcam_index,
164 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
165 struct vlan_mkex_info *mkex = &dev->vlan_info.mkex;
166 volatile uint8_t *key_data, *key_mask;
167 struct npc_mcam_read_entry_req *req;
168 struct npc_mcam_read_entry_rsp *rsp;
169 struct otx2_mbox *mbox = dev->mbox;
170 uint64_t mcam_data, mcam_mask;
171 struct mcam_entry entry;
172 uint8_t intf, mcam_ena;
173 int idx, rc = -EINVAL;
176 memset(&entry, 0, sizeof(struct mcam_entry));
178 /* Read entry first */
179 req = otx2_mbox_alloc_msg_npc_mcam_read_entry(mbox);
181 req->entry = mcam_index;
183 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
185 otx2_err("Failed to read entry %d", mcam_index);
189 entry = rsp->entry_data;
191 mcam_ena = rsp->enable;
193 /* Update mcam address */
194 key_data = (volatile uint8_t *)entry.kw;
195 key_mask = (volatile uint8_t *)entry.kw_mask;
199 otx2_mbox_memcpy(key_mask + mkex->la_xtract.key_off,
200 &mcam_mask, mkex->la_xtract.len + 1);
204 mac_addr = dev->mac_addr;
205 for (idx = RTE_ETHER_ADDR_LEN - 1; idx >= 0; idx--)
206 mcam_data |= ((uint64_t)*mac_addr++) << (8 * idx);
208 mcam_mask = BIT_ULL(48) - 1;
210 otx2_mbox_memcpy(key_data + mkex->la_xtract.key_off,
211 &mcam_data, mkex->la_xtract.len + 1);
212 otx2_mbox_memcpy(key_mask + mkex->la_xtract.key_off,
213 &mcam_mask, mkex->la_xtract.len + 1);
216 /* Write back the mcam entry */
217 rc = nix_vlan_mcam_write(eth_dev, mcam_index,
218 &entry, intf, mcam_ena);
220 otx2_err("Failed to write entry %d", mcam_index);
226 otx2_nix_vlan_update_promisc(struct rte_eth_dev *eth_dev, int enable)
228 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
229 struct otx2_vlan_info *vlan = &dev->vlan_info;
230 struct vlan_entry *entry;
232 /* Already in required mode */
233 if (enable == vlan->promisc_on)
236 /* Update default rx entry */
237 if (vlan->def_rx_mcam_idx)
238 nix_vlan_update_mac(eth_dev, vlan->def_rx_mcam_idx, enable);
240 /* Update all other rx filter entries */
241 TAILQ_FOREACH(entry, &vlan->fltr_tbl, next)
242 nix_vlan_update_mac(eth_dev, entry->mcam_idx, enable);
244 vlan->promisc_on = enable;
247 /* Configure mcam entry with required MCAM search rules */
249 nix_vlan_mcam_config(struct rte_eth_dev *eth_dev,
250 uint16_t vlan_id, uint16_t flags)
252 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
253 struct vlan_mkex_info *mkex = &dev->vlan_info.mkex;
254 volatile uint8_t *key_data, *key_mask;
255 uint64_t mcam_data, mcam_mask;
256 struct mcam_entry entry;
260 memset(&entry, 0, sizeof(struct mcam_entry));
261 key_data = (volatile uint8_t *)entry.kw;
262 key_mask = (volatile uint8_t *)entry.kw_mask;
264 /* Channel base extracted to KW0[11:0] */
265 entry.kw[kwi] = dev->rx_chan_base;
266 entry.kw_mask[kwi] = BIT_ULL(12) - 1;
268 /* Adds vlan_id & LB CTAG flag to MCAM KW */
269 if (flags & VLAN_ID_MATCH) {
270 entry.kw[kwi] |= NPC_LT_LB_CTAG << mkex->lb_lt_offset;
271 entry.kw_mask[kwi] |= 0xFULL << mkex->lb_lt_offset;
273 mcam_data = (vlan_id << 16);
274 mcam_mask = (BIT_ULL(16) - 1) << 16;
275 otx2_mbox_memcpy(key_data + mkex->lb_xtract.key_off,
276 &mcam_data, mkex->lb_xtract.len + 1);
277 otx2_mbox_memcpy(key_mask + mkex->lb_xtract.key_off,
278 &mcam_mask, mkex->lb_xtract.len + 1);
281 /* Adds LB STAG flag to MCAM KW */
282 if (flags & QINQ_F_MATCH) {
283 entry.kw[kwi] |= NPC_LT_LB_STAG << mkex->lb_lt_offset;
284 entry.kw_mask[kwi] |= 0xFULL << mkex->lb_lt_offset;
287 /* Adds LB CTAG & LB STAG flags to MCAM KW */
288 if (flags & VTAG_F_MATCH) {
289 entry.kw[kwi] |= (NPC_LT_LB_CTAG | NPC_LT_LB_STAG)
290 << mkex->lb_lt_offset;
291 entry.kw_mask[kwi] |= (NPC_LT_LB_CTAG & NPC_LT_LB_STAG)
292 << mkex->lb_lt_offset;
295 /* Adds port MAC address to MCAM KW */
296 if (flags & MAC_ADDR_MATCH) {
298 mac_addr = dev->mac_addr;
299 for (idx = RTE_ETHER_ADDR_LEN - 1; idx >= 0; idx--)
300 mcam_data |= ((uint64_t)*mac_addr++) << (8 * idx);
302 mcam_mask = BIT_ULL(48) - 1;
303 otx2_mbox_memcpy(key_data + mkex->la_xtract.key_off,
304 &mcam_data, mkex->la_xtract.len + 1);
305 otx2_mbox_memcpy(key_mask + mkex->la_xtract.key_off,
306 &mcam_mask, mkex->la_xtract.len + 1);
309 /* VLAN_DROP: for drop action for all vlan packets when filter is on.
310 * For QinQ, enable vtag action for both outer & inner tags
312 if (flags & VLAN_DROP)
313 nix_set_rx_vlan_action(eth_dev, &entry, false, true);
314 else if (flags & QINQ_F_MATCH)
315 nix_set_rx_vlan_action(eth_dev, &entry, true, false);
317 nix_set_rx_vlan_action(eth_dev, &entry, false, false);
319 if (flags & DEF_F_ENTRY)
320 dev->vlan_info.def_rx_mcam_ent = entry;
322 return nix_vlan_mcam_alloc_and_write(eth_dev, &entry, NIX_INTF_RX,
326 /* Installs/Removes/Modifies default rx entry */
328 nix_vlan_handle_default_rx_entry(struct rte_eth_dev *eth_dev, bool strip,
329 bool filter, bool enable)
331 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
332 struct otx2_vlan_info *vlan = &dev->vlan_info;
336 /* Use default mcam entry to either drop vlan traffic when
337 * vlan filter is on or strip vtag when strip is enabled.
338 * Allocate default entry which matches port mac address
339 * and vtag(ctag/stag) flags with drop action.
341 if (!vlan->def_rx_mcam_idx) {
342 if (!eth_dev->data->promiscuous)
343 flags = MAC_ADDR_MATCH;
345 if (filter && enable)
346 flags |= VTAG_F_MATCH | VLAN_DROP;
347 else if (strip && enable)
348 flags |= VTAG_F_MATCH;
352 flags |= DEF_F_ENTRY;
354 mcam_idx = nix_vlan_mcam_config(eth_dev, 0, flags);
356 otx2_err("Failed to config vlan mcam");
360 vlan->def_rx_mcam_idx = mcam_idx;
364 /* Filter is already enabled, so packets would be dropped anyways. No
365 * processing needed for enabling strip wrt mcam entry.
368 /* Filter disable request */
369 if (vlan->filter_on && filter && !enable) {
370 vlan->def_rx_mcam_ent.action &= ~((uint64_t)0xF);
372 /* Free default rx entry only when
373 * 1. strip is not on and
374 * 2. qinq entry is allocated before default entry.
376 if (vlan->strip_on ||
377 (vlan->qinq_on && !vlan->qinq_before_def)) {
378 if (eth_dev->data->dev_conf.rxmode.mq_mode ==
380 vlan->def_rx_mcam_ent.action |=
383 vlan->def_rx_mcam_ent.action |=
384 NIX_RX_ACTIONOP_UCAST;
385 return nix_vlan_mcam_write(eth_dev,
386 vlan->def_rx_mcam_idx,
387 &vlan->def_rx_mcam_ent,
390 rc = nix_vlan_mcam_free(dev, vlan->def_rx_mcam_idx);
393 vlan->def_rx_mcam_idx = 0;
397 /* Filter enable request */
398 if (!vlan->filter_on && filter && enable) {
399 vlan->def_rx_mcam_ent.action &= ~((uint64_t)0xF);
400 vlan->def_rx_mcam_ent.action |= NIX_RX_ACTIONOP_DROP;
401 return nix_vlan_mcam_write(eth_dev, vlan->def_rx_mcam_idx,
402 &vlan->def_rx_mcam_ent, NIX_INTF_RX, 1);
405 /* Strip disable request */
406 if (vlan->strip_on && strip && !enable) {
407 if (!vlan->filter_on &&
408 !(vlan->qinq_on && !vlan->qinq_before_def)) {
409 rc = nix_vlan_mcam_free(dev, vlan->def_rx_mcam_idx);
412 vlan->def_rx_mcam_idx = 0;
419 /* Configure vlan stripping on or off */
421 nix_vlan_hw_strip(struct rte_eth_dev *eth_dev, const uint8_t enable)
423 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
424 struct otx2_mbox *mbox = dev->mbox;
425 struct nix_vtag_config *vtag_cfg;
428 rc = nix_vlan_handle_default_rx_entry(eth_dev, true, false, enable);
430 otx2_err("Failed to config default rx entry");
434 vtag_cfg = otx2_mbox_alloc_msg_nix_vtag_cfg(mbox);
435 /* cfg_type = 1 for rx vlan cfg */
436 vtag_cfg->cfg_type = VTAG_RX;
439 vtag_cfg->rx.strip_vtag = 1;
441 vtag_cfg->rx.strip_vtag = 0;
444 vtag_cfg->rx.capture_vtag = 1;
445 vtag_cfg->vtag_size = NIX_VTAGSIZE_T4;
446 /* Use rx vtag type index[0] for now */
447 vtag_cfg->rx.vtag_type = 0;
449 rc = otx2_mbox_process(mbox);
453 dev->vlan_info.strip_on = enable;
457 /* Configure vlan filtering on or off for all vlans if vlan_id == 0 */
459 nix_vlan_hw_filter(struct rte_eth_dev *eth_dev, const uint8_t enable,
462 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
465 if (!vlan_id && enable) {
466 rc = nix_vlan_handle_default_rx_entry(eth_dev, false, true,
469 otx2_err("Failed to config vlan mcam");
472 dev->vlan_info.filter_on = enable;
476 if (!vlan_id && !enable) {
477 rc = nix_vlan_handle_default_rx_entry(eth_dev, false, true,
480 otx2_err("Failed to config vlan mcam");
483 dev->vlan_info.filter_on = enable;
490 /* Configure double vlan(qinq) on or off */
492 otx2_nix_config_double_vlan(struct rte_eth_dev *eth_dev,
493 const uint8_t enable)
495 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
496 struct otx2_vlan_info *vlan_info;
500 vlan_info = &dev->vlan_info;
503 if (!vlan_info->qinq_mcam_idx)
506 rc = nix_vlan_mcam_free(dev, vlan_info->qinq_mcam_idx);
510 vlan_info->qinq_mcam_idx = 0;
511 dev->vlan_info.qinq_on = 0;
512 vlan_info->qinq_before_def = 0;
516 if (eth_dev->data->promiscuous)
517 mcam_idx = nix_vlan_mcam_config(eth_dev, 0, QINQ_F_MATCH);
519 mcam_idx = nix_vlan_mcam_config(eth_dev, 0,
520 QINQ_F_MATCH | MAC_ADDR_MATCH);
524 if (!vlan_info->def_rx_mcam_idx)
525 vlan_info->qinq_before_def = 1;
527 vlan_info->qinq_mcam_idx = mcam_idx;
528 dev->vlan_info.qinq_on = 1;
533 otx2_nix_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
535 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
536 uint64_t offloads = dev->rx_offloads;
537 struct rte_eth_rxmode *rxmode;
540 rxmode = ð_dev->data->dev_conf.rxmode;
542 if (mask & ETH_VLAN_EXTEND_MASK) {
543 otx2_err("Extend offload not supported");
547 if (mask & ETH_VLAN_STRIP_MASK) {
548 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
549 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
550 rc = nix_vlan_hw_strip(eth_dev, true);
552 offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
553 rc = nix_vlan_hw_strip(eth_dev, false);
559 if (mask & ETH_VLAN_FILTER_MASK) {
560 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
561 offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
562 rc = nix_vlan_hw_filter(eth_dev, true, 0);
564 offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
565 rc = nix_vlan_hw_filter(eth_dev, false, 0);
571 if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP) {
572 if (!dev->vlan_info.qinq_on) {
573 offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
574 rc = otx2_nix_config_double_vlan(eth_dev, true);
579 if (dev->vlan_info.qinq_on) {
580 offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
581 rc = otx2_nix_config_double_vlan(eth_dev, false);
587 if (offloads & (DEV_RX_OFFLOAD_VLAN_STRIP |
588 DEV_RX_OFFLOAD_QINQ_STRIP)) {
589 dev->rx_offloads |= offloads;
590 dev->rx_offload_flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
598 nix_vlan_rx_mkex_offset(uint64_t mask)
603 nib_count += mask & 1;
607 return nib_count * 4;
611 nix_vlan_get_mkex_info(struct otx2_eth_dev *dev)
613 struct vlan_mkex_info *mkex = &dev->vlan_info.mkex;
614 struct otx2_npc_flow_info *npc = &dev->npc_flow;
615 struct npc_xtract_info *x_info = NULL;
621 otx2_err("Missing npc mkex configuration");
625 #define NPC_KEX_CHAN_NIBBLE_ENA 0x7ULL
626 #define NPC_KEX_LB_LTYPE_NIBBLE_ENA 0x1000ULL
627 #define NPC_KEX_LB_LTYPE_NIBBLE_MASK 0xFFFULL
629 rx_keyx = npc->keyx_supp_nmask[NPC_MCAM_RX];
630 if ((rx_keyx & NPC_KEX_CHAN_NIBBLE_ENA) != NPC_KEX_CHAN_NIBBLE_ENA)
633 if ((rx_keyx & NPC_KEX_LB_LTYPE_NIBBLE_ENA) !=
634 NPC_KEX_LB_LTYPE_NIBBLE_ENA)
638 nix_vlan_rx_mkex_offset(rx_keyx & NPC_KEX_LB_LTYPE_NIBBLE_MASK);
641 x_info = &(*p)[NPC_MCAM_RX][NPC_LID_LA][NPC_LT_LA_ETHER].xtract[0];
642 memcpy(&mkex->la_xtract, x_info, sizeof(struct npc_xtract_info));
643 x_info = &(*p)[NPC_MCAM_RX][NPC_LID_LB][NPC_LT_LB_CTAG].xtract[0];
644 memcpy(&mkex->lb_xtract, x_info, sizeof(struct npc_xtract_info));
650 otx2_nix_vlan_offload_init(struct rte_eth_dev *eth_dev)
652 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
655 /* Port initialized for first time or restarted */
656 if (!dev->configured) {
657 rc = nix_vlan_get_mkex_info(dev);
659 otx2_err("Failed to get vlan mkex info rc=%d", rc);
663 TAILQ_INIT(&dev->vlan_info.fltr_tbl);
667 ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK;
668 rc = otx2_nix_vlan_offload_set(eth_dev, mask);
670 otx2_err("Failed to set vlan offload rc=%d", rc);
678 otx2_nix_vlan_fini(struct rte_eth_dev *eth_dev)
680 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
681 struct otx2_vlan_info *vlan = &dev->vlan_info;
684 if (!dev->configured) {
685 if (vlan->def_rx_mcam_idx) {
686 rc = nix_vlan_mcam_free(dev, vlan->def_rx_mcam_idx);
692 otx2_nix_config_double_vlan(eth_dev, false);
693 vlan->def_rx_mcam_idx = 0;