1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include "otx2_ethdev.h"
9 flow_program_npc(struct otx2_parse_state *pst, struct otx2_mbox *mbox,
10 struct otx2_npc_flow_info *flow_info)
12 /* This is non-LDATA part in search key */
13 uint64_t key_data[2] = {0ULL, 0ULL};
14 uint64_t key_mask[2] = {0ULL, 0ULL};
15 int intf = pst->flow->nix_intf;
16 int key_len, bit = 0, index;
17 int off, idx, data_off = 0;
18 uint8_t lid, mask, data;
23 /* Skip till Layer A data start */
24 while (bit < NPC_PARSE_KEX_S_LA_OFFSET) {
25 if (flow_info->keyx_supp_nmask[intf] & (1 << bit))
30 /* Each bit represents 1 nibble */
34 for (lid = 0; lid < NPC_MAX_LID; lid++) {
36 off = NPC_PARSE_KEX_S_LID_OFFSET(lid);
37 lt = pst->lt[lid] & 0xf;
38 flags = pst->flags[lid] & 0xff;
41 layer_info = ((flow_info->keyx_supp_nmask[intf] >> off) & 0x7);
44 for (idx = 0; idx <= 2 ; idx++) {
45 if (layer_info & (1 << idx)) {
49 data = ((flags >> 4) & 0xf);
57 key_data[index] |= ((uint64_t)data <<
62 key_mask[index] |= ((uint64_t)mask <<
70 otx2_npc_dbg("Npc prog key data0: 0x%" PRIx64 ", data1: 0x%" PRIx64,
71 key_data[0], key_data[1]);
73 /* Copy this into mcam string */
74 key_len = (pst->npc->keyx_len[intf] + 7) / 8;
75 otx2_npc_dbg("Key_len = %d", key_len);
76 memcpy(pst->flow->mcam_data, key_data, key_len);
77 memcpy(pst->flow->mcam_mask, key_mask, key_len);
79 otx2_npc_dbg("Final flow data");
80 for (idx = 0; idx < OTX2_MAX_MCAM_WIDTH_DWORDS; idx++) {
81 otx2_npc_dbg("data[%d]: 0x%" PRIx64 ", mask[%d]: 0x%" PRIx64,
82 idx, pst->flow->mcam_data[idx],
83 idx, pst->flow->mcam_mask[idx]);
87 * Now we have mcam data and mask formatted as
88 * [Key_len/4 nibbles][0 or 1 nibble hole][data]
89 * hole is present if key_len is odd number of nibbles.
90 * mcam data must be split into 64 bits + 48 bits segments
91 * for each back W0, W1.
94 return otx2_flow_mcam_alloc_and_write(pst->flow, mbox, pst, flow_info);
98 flow_parse_attr(struct rte_eth_dev *eth_dev,
99 const struct rte_flow_attr *attr,
100 struct rte_flow_error *error,
101 struct rte_flow *flow)
103 struct otx2_eth_dev *dev = eth_dev->data->dev_private;
104 const char *errmsg = NULL;
107 errmsg = "Attribute can't be empty";
108 else if (attr->group)
109 errmsg = "Groups are not supported";
110 else if (attr->priority >= dev->npc_flow.flow_max_priority)
111 errmsg = "Priority should be with in specified range";
112 else if ((!attr->egress && !attr->ingress) ||
113 (attr->egress && attr->ingress))
114 errmsg = "Exactly one of ingress or egress must be set";
116 if (errmsg != NULL) {
117 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
123 flow->nix_intf = OTX2_INTF_RX;
125 flow->nix_intf = OTX2_INTF_TX;
127 flow->priority = attr->priority;
132 flow_get_free_rss_grp(struct rte_bitmap *bmap,
133 uint32_t size, uint32_t *pos)
135 for (*pos = 0; *pos < size; ++*pos) {
136 if (!rte_bitmap_get(bmap, *pos))
140 return *pos < size ? 0 : -1;
144 flow_configure_rss_action(struct otx2_eth_dev *dev,
145 const struct rte_flow_action_rss *rss,
146 uint8_t *alg_idx, uint32_t *rss_grp,
149 struct otx2_npc_flow_info *flow_info = &dev->npc_flow;
150 uint16_t reta[NIX_RSS_RETA_SIZE_MAX];
151 uint32_t flowkey_cfg, grp_aval, i;
152 uint16_t *ind_tbl = NULL;
153 uint8_t flowkey_algx;
156 rc = flow_get_free_rss_grp(flow_info->rss_grp_entries,
157 flow_info->rss_grps, &grp_aval);
158 /* RSS group :0 is not usable for flow rss action */
159 if (rc < 0 || grp_aval == 0)
164 otx2_nix_rss_set_key(dev, (uint8_t *)(uintptr_t)rss->key,
167 /* If queue count passed in the rss action is less than
168 * HW configured reta size, replicate rss action reta
169 * across HW reta table.
171 if (dev->rss_info.rss_size > rss->queue_num) {
174 for (i = 0; i < (dev->rss_info.rss_size / rss->queue_num); i++)
175 memcpy(reta + i * rss->queue_num, rss->queue,
176 sizeof(uint16_t) * rss->queue_num);
178 i = dev->rss_info.rss_size % rss->queue_num;
180 memcpy(&reta[dev->rss_info.rss_size] - i,
181 rss->queue, i * sizeof(uint16_t));
183 ind_tbl = (uint16_t *)(uintptr_t)rss->queue;
186 rc = otx2_nix_rss_tbl_init(dev, *rss_grp, ind_tbl);
188 otx2_err("Failed to init rss table rc = %d", rc);
192 flowkey_cfg = otx2_rss_ethdev_to_nix(dev, rss->types, rss->level);
194 rc = otx2_rss_set_hf(dev, flowkey_cfg, &flowkey_algx,
195 *rss_grp, mcam_index);
197 otx2_err("Failed to set rss hash function rc = %d", rc);
201 *alg_idx = flowkey_algx;
203 rte_bitmap_set(flow_info->rss_grp_entries, *rss_grp);
210 flow_program_rss_action(struct rte_eth_dev *eth_dev,
211 const struct rte_flow_action actions[],
212 struct rte_flow *flow)
214 struct otx2_eth_dev *dev = eth_dev->data->dev_private;
215 const struct rte_flow_action_rss *rss;
220 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
221 if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
222 rss = (const struct rte_flow_action_rss *)actions->conf;
224 rc = flow_configure_rss_action(dev,
225 rss, &alg_idx, &rss_grp,
231 ((uint64_t)(alg_idx & NIX_RSS_ACT_ALG_MASK) <<
232 NIX_RSS_ACT_ALG_OFFSET) |
233 ((uint64_t)(rss_grp & NIX_RSS_ACT_GRP_MASK) <<
234 NIX_RSS_ACT_GRP_OFFSET);
241 flow_parse_meta_items(__rte_unused struct otx2_parse_state *pst)
243 otx2_npc_dbg("Meta Item");
248 * Parse function of each layer:
249 * - Consume one or more patterns that are relevant.
250 * - Update parse_state
251 * - Set parse_state.pattern = last item consumed
252 * - Set appropriate error code/message when returning error.
254 typedef int (*flow_parse_stage_func_t)(struct otx2_parse_state *pst);
257 flow_parse_pattern(struct rte_eth_dev *dev,
258 const struct rte_flow_item pattern[],
259 struct rte_flow_error *error,
260 struct rte_flow *flow,
261 struct otx2_parse_state *pst)
263 flow_parse_stage_func_t parse_stage_funcs[] = {
264 flow_parse_meta_items,
274 struct otx2_eth_dev *hw = dev->data->dev_private;
279 if (pattern == NULL) {
280 rte_flow_error_set(error, EINVAL,
281 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
286 memset(pst, 0, sizeof(*pst));
287 pst->npc = &hw->npc_flow;
291 /* Use integral byte offset */
292 key_offset = pst->npc->keyx_len[flow->nix_intf];
293 key_offset = (key_offset + 7) / 8;
295 /* Location where LDATA would begin */
296 pst->mcam_data = (uint8_t *)flow->mcam_data;
297 pst->mcam_mask = (uint8_t *)flow->mcam_mask;
299 while (pattern->type != RTE_FLOW_ITEM_TYPE_END &&
300 layer < RTE_DIM(parse_stage_funcs)) {
301 otx2_npc_dbg("Pattern type = %d", pattern->type);
303 /* Skip place-holders */
304 pattern = otx2_flow_skip_void_and_any_items(pattern);
306 pst->pattern = pattern;
307 otx2_npc_dbg("Is tunnel = %d, layer = %d", pst->tunnel, layer);
308 rc = parse_stage_funcs[layer](pst);
315 * Parse stage function sets pst->pattern to
316 * 1 past the last item it consumed.
318 pattern = pst->pattern;
324 /* Skip trailing place-holders */
325 pattern = otx2_flow_skip_void_and_any_items(pattern);
327 /* Are there more items than what we can handle? */
328 if (pattern->type != RTE_FLOW_ITEM_TYPE_END) {
329 rte_flow_error_set(error, ENOTSUP,
330 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
331 "unsupported item in the sequence");
339 flow_parse_rule(struct rte_eth_dev *dev,
340 const struct rte_flow_attr *attr,
341 const struct rte_flow_item pattern[],
342 const struct rte_flow_action actions[],
343 struct rte_flow_error *error,
344 struct rte_flow *flow,
345 struct otx2_parse_state *pst)
349 /* Check attributes */
350 err = flow_parse_attr(dev, attr, error, flow);
355 err = otx2_flow_parse_actions(dev, attr, actions, error, flow);
360 err = flow_parse_pattern(dev, pattern, error, flow, pst);
364 /* Check for overlaps? */
369 otx2_flow_validate(struct rte_eth_dev *dev,
370 const struct rte_flow_attr *attr,
371 const struct rte_flow_item pattern[],
372 const struct rte_flow_action actions[],
373 struct rte_flow_error *error)
375 struct otx2_parse_state parse_state;
376 struct rte_flow flow;
378 memset(&flow, 0, sizeof(flow));
379 return flow_parse_rule(dev, attr, pattern, actions, error, &flow,
383 static struct rte_flow *
384 otx2_flow_create(struct rte_eth_dev *dev,
385 const struct rte_flow_attr *attr,
386 const struct rte_flow_item pattern[],
387 const struct rte_flow_action actions[],
388 struct rte_flow_error *error)
390 struct otx2_eth_dev *hw = dev->data->dev_private;
391 struct otx2_parse_state parse_state;
392 struct otx2_mbox *mbox = hw->mbox;
393 struct rte_flow *flow, *flow_iter;
394 struct otx2_flow_list *list;
397 flow = rte_zmalloc("otx2_rte_flow", sizeof(*flow), 0);
399 rte_flow_error_set(error, ENOMEM,
400 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
402 "Memory allocation failed");
405 memset(flow, 0, sizeof(*flow));
407 rc = flow_parse_rule(dev, attr, pattern, actions, error, flow,
412 rc = flow_program_npc(&parse_state, mbox, &hw->npc_flow);
414 rte_flow_error_set(error, EIO,
415 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
417 "Failed to insert filter");
421 rc = flow_program_rss_action(dev, actions, flow);
423 rte_flow_error_set(error, EIO,
424 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
426 "Failed to program rss action");
431 list = &hw->npc_flow.flow_list[flow->priority];
432 /* List in ascending order of mcam entries */
433 TAILQ_FOREACH(flow_iter, list, next) {
434 if (flow_iter->mcam_id > flow->mcam_id) {
435 TAILQ_INSERT_BEFORE(flow_iter, flow, next);
440 TAILQ_INSERT_TAIL(list, flow, next);
448 const struct rte_flow_ops otx2_flow_ops = {
449 .validate = otx2_flow_validate,
450 .create = otx2_flow_create,