*/
#include "otx2_ethdev.h"
+#include "otx2_ethdev_sec.h"
#include "otx2_flow.h"
+int
+otx2_flow_free_all_resources(struct otx2_eth_dev *hw)
+{
+ struct otx2_npc_flow_info *npc = &hw->npc_flow;
+ struct otx2_mbox *mbox = hw->mbox;
+ struct otx2_mcam_ents_info *info;
+ struct rte_bitmap *bmap;
+ struct rte_flow *flow;
+ int entry_count = 0;
+ int rc, idx;
+
+ for (idx = 0; idx < npc->flow_max_priority; idx++) {
+ info = &npc->flow_entry_info[idx];
+ entry_count += info->live_ent;
+ }
+
+ if (entry_count == 0)
+ return 0;
+
+ /* Free all MCAM entries allocated */
+ rc = otx2_flow_mcam_free_all_entries(mbox);
+
+ /* Free any MCAM counters and delete flow list */
+ for (idx = 0; idx < npc->flow_max_priority; idx++) {
+ while ((flow = TAILQ_FIRST(&npc->flow_list[idx])) != NULL) {
+ if (flow->ctr_id != NPC_COUNTER_NONE)
+ rc |= otx2_flow_mcam_free_counter(mbox,
+ flow->ctr_id);
+
+ TAILQ_REMOVE(&npc->flow_list[idx], flow, next);
+ rte_free(flow);
+ bmap = npc->live_entries[flow->priority];
+ rte_bitmap_clear(bmap, flow->mcam_id);
+ }
+ info = &npc->flow_entry_info[idx];
+ info->free_ent = 0;
+ info->live_ent = 0;
+ }
+ return rc;
+}
+
+
static int
flow_program_npc(struct otx2_parse_state *pst, struct otx2_mbox *mbox,
struct otx2_npc_flow_info *flow_info)
return 0;
}
+static int
+flow_free_rss_action(struct rte_eth_dev *eth_dev,
+ struct rte_flow *flow)
+{
+ struct otx2_eth_dev *dev = eth_dev->data->dev_private;
+ struct otx2_npc_flow_info *npc = &dev->npc_flow;
+ uint32_t rss_grp;
+
+ if (flow->npc_action & NIX_RX_ACTIONOP_RSS) {
+ rss_grp = (flow->npc_action >> NIX_RSS_ACT_GRP_OFFSET) &
+ NIX_RSS_ACT_GRP_MASK;
+ if (rss_grp == 0 || rss_grp >= npc->rss_grps)
+ return -EINVAL;
+
+ rte_bitmap_clear(npc->rss_grp_entries, rss_grp);
+ }
+
+ return 0;
+}
+
+static int
+flow_update_sec_tt(struct rte_eth_dev *eth_dev,
+ const struct rte_flow_action actions[])
+{
+ int rc = 0;
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ if (actions->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
+ rc = otx2_eth_sec_update_tag_type(eth_dev);
+ break;
+ }
+ }
+
+ return rc;
+}
+
static int
flow_parse_meta_items(__rte_unused struct otx2_parse_state *pst)
{
{
flow_parse_stage_func_t parse_stage_funcs[] = {
flow_parse_meta_items,
+ otx2_flow_parse_higig2_hdr,
otx2_flow_parse_la,
otx2_flow_parse_lb,
otx2_flow_parse_lc,
goto err_exit;
}
+ if (hw->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+ rc = flow_update_sec_tt(dev, actions);
+ if (rc != 0) {
+ rte_flow_error_set(error, EIO,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Failed to update tt with sec act");
+ goto err_exit;
+ }
+ }
list = &hw->npc_flow.flow_list[flow->priority];
/* List in ascending order of mcam entries */
return NULL;
}
+static int
+otx2_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct otx2_eth_dev *hw = dev->data->dev_private;
+ struct otx2_npc_flow_info *npc = &hw->npc_flow;
+ struct otx2_mbox *mbox = hw->mbox;
+ struct rte_bitmap *bmap;
+ uint16_t match_id;
+ int rc;
+
+ match_id = (flow->npc_action >> NIX_RX_ACT_MATCH_OFFSET) &
+ NIX_RX_ACT_MATCH_MASK;
+
+ if (match_id && match_id < OTX2_FLOW_ACTION_FLAG_DEFAULT) {
+ if (rte_atomic32_read(&npc->mark_actions) == 0)
+ return -EINVAL;
+
+ /* Clear mark offload flag if there are no more mark actions */
+ if (rte_atomic32_sub_return(&npc->mark_actions, 1) == 0) {
+ hw->rx_offload_flags &= ~NIX_RX_OFFLOAD_MARK_UPDATE_F;
+ otx2_eth_set_rx_function(dev);
+ }
+ }
+
+ rc = flow_free_rss_action(dev, flow);
+ if (rc != 0) {
+ rte_flow_error_set(error, EIO,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Failed to free rss action");
+ }
+
+ rc = otx2_flow_mcam_free_entry(mbox, flow->mcam_id);
+ if (rc != 0) {
+ rte_flow_error_set(error, EIO,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Failed to destroy filter");
+ }
+
+ TAILQ_REMOVE(&npc->flow_list[flow->priority], flow, next);
+
+ bmap = npc->live_entries[flow->priority];
+ rte_bitmap_clear(bmap, flow->mcam_id);
+
+ rte_free(flow);
+ return 0;
+}
+
+static int
+otx2_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ struct otx2_eth_dev *hw = dev->data->dev_private;
+ int rc;
+
+ rc = otx2_flow_free_all_resources(hw);
+ if (rc) {
+ otx2_err("Error when deleting NPC MCAM entries "
+ ", counters");
+ rte_flow_error_set(error, EIO,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Failed to flush filter");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+otx2_flow_isolate(struct rte_eth_dev *dev __rte_unused,
+ int enable __rte_unused,
+ struct rte_flow_error *error)
+{
+ /*
+ * If we support, we need to un-install the default mcam
+ * entry for this port.
+ */
+
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Flow isolation not supported");
+
+ return -rte_errno;
+}
+
+static int
+otx2_flow_query(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ const struct rte_flow_action *action,
+ void *data,
+ struct rte_flow_error *error)
+{
+ struct otx2_eth_dev *hw = dev->data->dev_private;
+ struct rte_flow_query_count *query = data;
+ struct otx2_mbox *mbox = hw->mbox;
+ const char *errmsg = NULL;
+ int errcode = ENOTSUP;
+ int rc;
+
+ if (action->type != RTE_FLOW_ACTION_TYPE_COUNT) {
+ errmsg = "Only COUNT is supported in query";
+ goto err_exit;
+ }
+
+ if (flow->ctr_id == NPC_COUNTER_NONE) {
+ errmsg = "Counter is not available";
+ goto err_exit;
+ }
+
+ rc = otx2_flow_mcam_read_counter(mbox, flow->ctr_id, &query->hits);
+ if (rc != 0) {
+ errcode = EIO;
+ errmsg = "Error reading flow counter";
+ goto err_exit;
+ }
+ query->hits_set = 1;
+ query->bytes_set = 0;
+
+ if (query->reset)
+ rc = otx2_flow_mcam_clear_counter(mbox, flow->ctr_id);
+ if (rc != 0) {
+ errcode = EIO;
+ errmsg = "Error clearing flow counter";
+ goto err_exit;
+ }
+
+ return 0;
+
+err_exit:
+ rte_flow_error_set(error, errcode,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ errmsg);
+ return -rte_errno;
+}
+
const struct rte_flow_ops otx2_flow_ops = {
.validate = otx2_flow_validate,
.create = otx2_flow_create,
+ .destroy = otx2_flow_destroy,
+ .flush = otx2_flow_flush,
+ .query = otx2_flow_query,
+ .isolate = otx2_flow_isolate,
};
+
+static int
+flow_supp_key_len(uint32_t supp_mask)
+{
+ int nib_count = 0;
+ while (supp_mask) {
+ nib_count++;
+ supp_mask &= (supp_mask - 1);
+ }
+ return nib_count * 4;
+}
+
+/* Refer HRM register:
+ * NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG
+ * and
+ * NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG
+ **/
+#define BYTESM1_SHIFT 16
+#define HDR_OFF_SHIFT 8
+static void
+flow_update_kex_info(struct npc_xtract_info *xtract_info,
+ uint64_t val)
+{
+ xtract_info->len = ((val >> BYTESM1_SHIFT) & 0xf) + 1;
+ xtract_info->hdr_off = (val >> HDR_OFF_SHIFT) & 0xff;
+ xtract_info->key_off = val & 0x3f;
+ xtract_info->enable = ((val >> 7) & 0x1);
+ xtract_info->flags_enable = ((val >> 6) & 0x1);
+}
+
+static void
+flow_process_mkex_cfg(struct otx2_npc_flow_info *npc,
+ struct npc_get_kex_cfg_rsp *kex_rsp)
+{
+ volatile uint64_t (*q)[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT]
+ [NPC_MAX_LD];
+ struct npc_xtract_info *x_info = NULL;
+ int lid, lt, ld, fl, ix;
+ otx2_dxcfg_t *p;
+ uint64_t keyw;
+ uint64_t val;
+
+ npc->keyx_supp_nmask[NPC_MCAM_RX] =
+ kex_rsp->rx_keyx_cfg & 0x7fffffffULL;
+ npc->keyx_supp_nmask[NPC_MCAM_TX] =
+ kex_rsp->tx_keyx_cfg & 0x7fffffffULL;
+ npc->keyx_len[NPC_MCAM_RX] =
+ flow_supp_key_len(npc->keyx_supp_nmask[NPC_MCAM_RX]);
+ npc->keyx_len[NPC_MCAM_TX] =
+ flow_supp_key_len(npc->keyx_supp_nmask[NPC_MCAM_TX]);
+
+ keyw = (kex_rsp->rx_keyx_cfg >> 32) & 0x7ULL;
+ npc->keyw[NPC_MCAM_RX] = keyw;
+ keyw = (kex_rsp->tx_keyx_cfg >> 32) & 0x7ULL;
+ npc->keyw[NPC_MCAM_TX] = keyw;
+
+ /* Update KEX_LD_FLAG */
+ for (ix = 0; ix < NPC_MAX_INTF; ix++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ for (fl = 0; fl < NPC_MAX_LFL; fl++) {
+ x_info =
+ &npc->prx_fxcfg[ix][ld][fl].xtract[0];
+ val = kex_rsp->intf_ld_flags[ix][ld][fl];
+ flow_update_kex_info(x_info, val);
+ }
+ }
+ }
+
+ /* Update LID, LT and LDATA cfg */
+ p = &npc->prx_dxcfg;
+ q = (volatile uint64_t (*)[][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD])
+ (&kex_rsp->intf_lid_lt_ld);
+ for (ix = 0; ix < NPC_MAX_INTF; ix++) {
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ x_info = &(*p)[ix][lid][lt].xtract[ld];
+ val = (*q)[ix][lid][lt][ld];
+ flow_update_kex_info(x_info, val);
+ }
+ }
+ }
+ }
+ /* Update LDATA Flags cfg */
+ npc->prx_lfcfg[0].i = kex_rsp->kex_ld_flags[0];
+ npc->prx_lfcfg[1].i = kex_rsp->kex_ld_flags[1];
+}
+
+static struct otx2_idev_kex_cfg *
+flow_intra_dev_kex_cfg(void)
+{
+ static const char name[] = "octeontx2_intra_device_kex_conf";
+ struct otx2_idev_kex_cfg *idev;
+ const struct rte_memzone *mz;
+
+ mz = rte_memzone_lookup(name);
+ if (mz)
+ return mz->addr;
+
+ /* Request for the first time */
+ mz = rte_memzone_reserve_aligned(name, sizeof(struct otx2_idev_kex_cfg),
+ SOCKET_ID_ANY, 0, OTX2_ALIGN);
+ if (mz) {
+ idev = mz->addr;
+ rte_atomic16_set(&idev->kex_refcnt, 0);
+ return idev;
+ }
+ return NULL;
+}
+
+static int
+flow_fetch_kex_cfg(struct otx2_eth_dev *dev)
+{
+ struct otx2_npc_flow_info *npc = &dev->npc_flow;
+ struct npc_get_kex_cfg_rsp *kex_rsp;
+ struct otx2_mbox *mbox = dev->mbox;
+ char mkex_pfl_name[MKEX_NAME_LEN];
+ struct otx2_idev_kex_cfg *idev;
+ int rc = 0;
+
+ idev = flow_intra_dev_kex_cfg();
+ if (!idev)
+ return -ENOMEM;
+
+ /* Is kex_cfg read by any another driver? */
+ if (rte_atomic16_add_return(&idev->kex_refcnt, 1) == 1) {
+ /* Call mailbox to get key & data size */
+ (void)otx2_mbox_alloc_msg_npc_get_kex_cfg(mbox);
+ otx2_mbox_msg_send(mbox, 0);
+ rc = otx2_mbox_get_rsp(mbox, 0, (void *)&kex_rsp);
+ if (rc) {
+ otx2_err("Failed to fetch NPC keyx config");
+ goto done;
+ }
+ memcpy(&idev->kex_cfg, kex_rsp,
+ sizeof(struct npc_get_kex_cfg_rsp));
+ }
+
+ otx2_mbox_memcpy(mkex_pfl_name,
+ idev->kex_cfg.mkex_pfl_name, MKEX_NAME_LEN);
+
+ strlcpy((char *)dev->mkex_pfl_name,
+ mkex_pfl_name, sizeof(dev->mkex_pfl_name));
+
+ flow_process_mkex_cfg(npc, &idev->kex_cfg);
+
+done:
+ return rc;
+}
+
+int
+otx2_flow_init(struct otx2_eth_dev *hw)
+{
+ uint8_t *mem = NULL, *nix_mem = NULL, *npc_mem = NULL;
+ struct otx2_npc_flow_info *npc = &hw->npc_flow;
+ uint32_t bmap_sz;
+ int rc = 0, idx;
+
+ rc = flow_fetch_kex_cfg(hw);
+ if (rc) {
+ otx2_err("Failed to fetch NPC keyx config from idev");
+ return rc;
+ }
+
+ rte_atomic32_init(&npc->mark_actions);
+
+ npc->mcam_entries = NPC_MCAM_TOT_ENTRIES >> npc->keyw[NPC_MCAM_RX];
+ /* Free, free_rev, live and live_rev entries */
+ bmap_sz = rte_bitmap_get_memory_footprint(npc->mcam_entries);
+ mem = rte_zmalloc(NULL, 4 * bmap_sz * npc->flow_max_priority,
+ RTE_CACHE_LINE_SIZE);
+ if (mem == NULL) {
+ otx2_err("Bmap alloc failed");
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ npc->flow_entry_info = rte_zmalloc(NULL, npc->flow_max_priority
+ * sizeof(struct otx2_mcam_ents_info),
+ 0);
+ if (npc->flow_entry_info == NULL) {
+ otx2_err("flow_entry_info alloc failed");
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ npc->free_entries = rte_zmalloc(NULL, npc->flow_max_priority
+ * sizeof(struct rte_bitmap *),
+ 0);
+ if (npc->free_entries == NULL) {
+ otx2_err("free_entries alloc failed");
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ npc->free_entries_rev = rte_zmalloc(NULL, npc->flow_max_priority
+ * sizeof(struct rte_bitmap *),
+ 0);
+ if (npc->free_entries_rev == NULL) {
+ otx2_err("free_entries_rev alloc failed");
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ npc->live_entries = rte_zmalloc(NULL, npc->flow_max_priority
+ * sizeof(struct rte_bitmap *),
+ 0);
+ if (npc->live_entries == NULL) {
+ otx2_err("live_entries alloc failed");
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ npc->live_entries_rev = rte_zmalloc(NULL, npc->flow_max_priority
+ * sizeof(struct rte_bitmap *),
+ 0);
+ if (npc->live_entries_rev == NULL) {
+ otx2_err("live_entries_rev alloc failed");
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ npc->flow_list = rte_zmalloc(NULL, npc->flow_max_priority
+ * sizeof(struct otx2_flow_list),
+ 0);
+ if (npc->flow_list == NULL) {
+ otx2_err("flow_list alloc failed");
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ npc_mem = mem;
+ for (idx = 0; idx < npc->flow_max_priority; idx++) {
+ TAILQ_INIT(&npc->flow_list[idx]);
+
+ npc->free_entries[idx] =
+ rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
+ mem += bmap_sz;
+
+ npc->free_entries_rev[idx] =
+ rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
+ mem += bmap_sz;
+
+ npc->live_entries[idx] =
+ rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
+ mem += bmap_sz;
+
+ npc->live_entries_rev[idx] =
+ rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
+ mem += bmap_sz;
+
+ npc->flow_entry_info[idx].free_ent = 0;
+ npc->flow_entry_info[idx].live_ent = 0;
+ npc->flow_entry_info[idx].max_id = 0;
+ npc->flow_entry_info[idx].min_id = ~(0);
+ }
+
+ npc->rss_grps = NIX_RSS_GRPS;
+
+ bmap_sz = rte_bitmap_get_memory_footprint(npc->rss_grps);
+ nix_mem = rte_zmalloc(NULL, bmap_sz, RTE_CACHE_LINE_SIZE);
+ if (nix_mem == NULL) {
+ otx2_err("Bmap alloc failed");
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ npc->rss_grp_entries = rte_bitmap_init(npc->rss_grps, nix_mem, bmap_sz);
+
+ /* Group 0 will be used for RSS,
+ * 1 -7 will be used for rte_flow RSS action
+ */
+ rte_bitmap_set(npc->rss_grp_entries, 0);
+
+ return 0;
+
+err:
+ if (npc->flow_list)
+ rte_free(npc->flow_list);
+ if (npc->live_entries_rev)
+ rte_free(npc->live_entries_rev);
+ if (npc->live_entries)
+ rte_free(npc->live_entries);
+ if (npc->free_entries_rev)
+ rte_free(npc->free_entries_rev);
+ if (npc->free_entries)
+ rte_free(npc->free_entries);
+ if (npc->flow_entry_info)
+ rte_free(npc->flow_entry_info);
+ if (npc_mem)
+ rte_free(npc_mem);
+ return rc;
+}
+
+int
+otx2_flow_fini(struct otx2_eth_dev *hw)
+{
+ struct otx2_npc_flow_info *npc = &hw->npc_flow;
+ int rc;
+
+ rc = otx2_flow_free_all_resources(hw);
+ if (rc) {
+ otx2_err("Error when deleting NPC MCAM entries, counters");
+ return rc;
+ }
+
+ if (npc->flow_list)
+ rte_free(npc->flow_list);
+ if (npc->live_entries_rev)
+ rte_free(npc->live_entries_rev);
+ if (npc->live_entries)
+ rte_free(npc->live_entries);
+ if (npc->free_entries_rev)
+ rte_free(npc->free_entries_rev);
+ if (npc->free_entries)
+ rte_free(npc->free_entries);
+ if (npc->flow_entry_info)
+ rte_free(npc->flow_entry_info);
+
+ return 0;
+}