1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
4 #include <cnxk_rte_flow.h>
6 const struct cnxk_rte_flow_term_info term[] = {
7 [RTE_FLOW_ITEM_TYPE_ETH] = {ROC_NPC_ITEM_TYPE_ETH,
8 sizeof(struct rte_flow_item_eth)},
9 [RTE_FLOW_ITEM_TYPE_VLAN] = {ROC_NPC_ITEM_TYPE_VLAN,
10 sizeof(struct rte_flow_item_vlan)},
11 [RTE_FLOW_ITEM_TYPE_E_TAG] = {ROC_NPC_ITEM_TYPE_E_TAG,
12 sizeof(struct rte_flow_item_e_tag)},
13 [RTE_FLOW_ITEM_TYPE_IPV4] = {ROC_NPC_ITEM_TYPE_IPV4,
14 sizeof(struct rte_flow_item_ipv4)},
15 [RTE_FLOW_ITEM_TYPE_IPV6] = {ROC_NPC_ITEM_TYPE_IPV6,
16 sizeof(struct rte_flow_item_ipv6)},
17 [RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4] = {
18 ROC_NPC_ITEM_TYPE_ARP_ETH_IPV4,
19 sizeof(struct rte_flow_item_arp_eth_ipv4)},
20 [RTE_FLOW_ITEM_TYPE_MPLS] = {ROC_NPC_ITEM_TYPE_MPLS,
21 sizeof(struct rte_flow_item_mpls)},
22 [RTE_FLOW_ITEM_TYPE_ICMP] = {ROC_NPC_ITEM_TYPE_ICMP,
23 sizeof(struct rte_flow_item_icmp)},
24 [RTE_FLOW_ITEM_TYPE_UDP] = {ROC_NPC_ITEM_TYPE_UDP,
25 sizeof(struct rte_flow_item_udp)},
26 [RTE_FLOW_ITEM_TYPE_TCP] = {ROC_NPC_ITEM_TYPE_TCP,
27 sizeof(struct rte_flow_item_tcp)},
28 [RTE_FLOW_ITEM_TYPE_SCTP] = {ROC_NPC_ITEM_TYPE_SCTP,
29 sizeof(struct rte_flow_item_sctp)},
30 [RTE_FLOW_ITEM_TYPE_ESP] = {ROC_NPC_ITEM_TYPE_ESP,
31 sizeof(struct rte_flow_item_esp)},
32 [RTE_FLOW_ITEM_TYPE_GRE] = {ROC_NPC_ITEM_TYPE_GRE,
33 sizeof(struct rte_flow_item_gre)},
34 [RTE_FLOW_ITEM_TYPE_NVGRE] = {ROC_NPC_ITEM_TYPE_NVGRE,
35 sizeof(struct rte_flow_item_nvgre)},
36 [RTE_FLOW_ITEM_TYPE_VXLAN] = {ROC_NPC_ITEM_TYPE_VXLAN,
37 sizeof(struct rte_flow_item_vxlan)},
38 [RTE_FLOW_ITEM_TYPE_GTPC] = {ROC_NPC_ITEM_TYPE_GTPC,
39 sizeof(struct rte_flow_item_gtp)},
40 [RTE_FLOW_ITEM_TYPE_GTPU] = {ROC_NPC_ITEM_TYPE_GTPU,
41 sizeof(struct rte_flow_item_gtp)},
42 [RTE_FLOW_ITEM_TYPE_GENEVE] = {ROC_NPC_ITEM_TYPE_GENEVE,
43 sizeof(struct rte_flow_item_geneve)},
44 [RTE_FLOW_ITEM_TYPE_VXLAN_GPE] = {
45 ROC_NPC_ITEM_TYPE_VXLAN_GPE,
46 sizeof(struct rte_flow_item_vxlan_gpe)},
47 [RTE_FLOW_ITEM_TYPE_IPV6_EXT] = {ROC_NPC_ITEM_TYPE_IPV6_EXT,
48 sizeof(struct rte_flow_item_ipv6_ext)},
49 [RTE_FLOW_ITEM_TYPE_VOID] = {ROC_NPC_ITEM_TYPE_VOID, 0},
50 [RTE_FLOW_ITEM_TYPE_ANY] = {ROC_NPC_ITEM_TYPE_ANY, 0},
51 [RTE_FLOW_ITEM_TYPE_GRE_KEY] = {ROC_NPC_ITEM_TYPE_GRE_KEY,
53 [RTE_FLOW_ITEM_TYPE_HIGIG2] = {ROC_NPC_ITEM_TYPE_HIGIG2,
54 sizeof(struct rte_flow_item_higig2_hdr)},
55 [RTE_FLOW_ITEM_TYPE_RAW] = {ROC_NPC_ITEM_TYPE_RAW,
56 sizeof(struct rte_flow_item_raw)}};
59 npc_rss_action_validate(struct rte_eth_dev *eth_dev,
60 const struct rte_flow_attr *attr,
61 const struct rte_flow_action *act)
63 const struct rte_flow_action_rss *rss;
65 rss = (const struct rte_flow_action_rss *)act->conf;
68 plt_err("No support of RSS in egress");
72 if (eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
73 plt_err("multi-queue mode is disabled");
77 if (!rss || !rss->queue_num) {
78 plt_err("no valid queues");
82 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
83 plt_err("non-default RSS hash functions are not supported");
87 if (rss->key_len && rss->key_len > ROC_NIX_RSS_KEY_LEN) {
88 plt_err("RSS hash key too large");
96 npc_rss_flowkey_get(struct cnxk_eth_dev *eth_dev,
97 const struct roc_npc_action *rss_action,
98 uint32_t *flowkey_cfg)
100 const struct roc_npc_action_rss *rss;
102 rss = (const struct roc_npc_action_rss *)rss_action->conf;
104 *flowkey_cfg = cnxk_rss_ethdev_to_nix(eth_dev, rss->types, rss->level);
108 cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
109 const struct rte_flow_action actions[],
110 struct roc_npc_action in_actions[], uint32_t *flowkey_cfg)
112 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
113 const struct rte_flow_action_count *act_count;
114 const struct rte_flow_action_queue *act_q;
118 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
119 switch (actions->type) {
120 case RTE_FLOW_ACTION_TYPE_VOID:
121 in_actions[i].type = ROC_NPC_ACTION_TYPE_VOID;
124 case RTE_FLOW_ACTION_TYPE_MARK:
125 in_actions[i].type = ROC_NPC_ACTION_TYPE_MARK;
126 in_actions[i].conf = actions->conf;
129 case RTE_FLOW_ACTION_TYPE_FLAG:
130 in_actions[i].type = ROC_NPC_ACTION_TYPE_FLAG;
133 case RTE_FLOW_ACTION_TYPE_COUNT:
134 act_count = (const struct rte_flow_action_count *)
137 if (act_count->shared == 1) {
138 plt_npc_dbg("Shared counter is not supported");
141 in_actions[i].type = ROC_NPC_ACTION_TYPE_COUNT;
144 case RTE_FLOW_ACTION_TYPE_DROP:
145 in_actions[i].type = ROC_NPC_ACTION_TYPE_DROP;
148 case RTE_FLOW_ACTION_TYPE_PF:
149 in_actions[i].type = ROC_NPC_ACTION_TYPE_PF;
152 case RTE_FLOW_ACTION_TYPE_VF:
153 in_actions[i].type = ROC_NPC_ACTION_TYPE_VF;
154 in_actions[i].conf = actions->conf;
157 case RTE_FLOW_ACTION_TYPE_QUEUE:
158 act_q = (const struct rte_flow_action_queue *)
161 if (rq >= eth_dev->data->nb_rx_queues) {
162 plt_npc_dbg("Invalid queue index");
165 in_actions[i].type = ROC_NPC_ACTION_TYPE_QUEUE;
166 in_actions[i].conf = actions->conf;
169 case RTE_FLOW_ACTION_TYPE_RSS:
170 rc = npc_rss_action_validate(eth_dev, attr, actions);
173 in_actions[i].type = ROC_NPC_ACTION_TYPE_RSS;
174 in_actions[i].conf = actions->conf;
175 npc_rss_flowkey_get(dev, &in_actions[i], flowkey_cfg);
178 case RTE_FLOW_ACTION_TYPE_SECURITY:
179 in_actions[i].type = ROC_NPC_ACTION_TYPE_SEC;
181 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
182 in_actions[i].type = ROC_NPC_ACTION_TYPE_VLAN_STRIP;
184 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
185 in_actions[i].type = ROC_NPC_ACTION_TYPE_VLAN_INSERT;
186 in_actions[i].conf = actions->conf;
188 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
190 ROC_NPC_ACTION_TYPE_VLAN_ETHTYPE_INSERT;
191 in_actions[i].conf = actions->conf;
193 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
195 ROC_NPC_ACTION_TYPE_VLAN_PCP_INSERT;
196 in_actions[i].conf = actions->conf;
199 plt_npc_dbg("Action is not supported = %d",
205 in_actions[i].type = ROC_NPC_ACTION_TYPE_END;
213 cnxk_map_flow_data(struct rte_eth_dev *eth_dev,
214 const struct rte_flow_attr *attr,
215 const struct rte_flow_item pattern[],
216 const struct rte_flow_action actions[],
217 struct roc_npc_attr *in_attr,
218 struct roc_npc_item_info in_pattern[],
219 struct roc_npc_action in_actions[], uint32_t *flowkey_cfg)
223 in_attr->priority = attr->priority;
224 in_attr->ingress = attr->ingress;
225 in_attr->egress = attr->egress;
227 while (pattern->type != RTE_FLOW_ITEM_TYPE_END) {
228 in_pattern[i].spec = pattern->spec;
229 in_pattern[i].last = pattern->last;
230 in_pattern[i].mask = pattern->mask;
231 in_pattern[i].type = term[pattern->type].item_type;
232 in_pattern[i].size = term[pattern->type].item_size;
236 in_pattern[i].type = ROC_NPC_ITEM_TYPE_END;
238 return cnxk_map_actions(eth_dev, attr, actions, in_actions,
243 cnxk_flow_validate(struct rte_eth_dev *eth_dev,
244 const struct rte_flow_attr *attr,
245 const struct rte_flow_item pattern[],
246 const struct rte_flow_action actions[],
247 struct rte_flow_error *error)
249 struct roc_npc_item_info in_pattern[ROC_NPC_ITEM_TYPE_END + 1];
250 struct roc_npc_action in_actions[ROC_NPC_MAX_ACTION_COUNT];
251 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
252 struct roc_npc *npc = &dev->npc;
253 struct roc_npc_attr in_attr;
254 struct roc_npc_flow flow;
255 uint32_t flowkey_cfg = 0;
258 memset(&flow, 0, sizeof(flow));
260 rc = cnxk_map_flow_data(eth_dev, attr, pattern, actions, &in_attr,
261 in_pattern, in_actions, &flowkey_cfg);
263 rte_flow_error_set(error, 0, RTE_FLOW_ERROR_TYPE_ACTION_NUM,
264 NULL, "Failed to map flow data");
268 return roc_npc_flow_parse(npc, &in_attr, in_pattern, in_actions, &flow);
271 struct roc_npc_flow *
272 cnxk_flow_create(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
273 const struct rte_flow_item pattern[],
274 const struct rte_flow_action actions[],
275 struct rte_flow_error *error)
277 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
278 struct roc_npc_item_info in_pattern[ROC_NPC_ITEM_TYPE_END + 1];
279 struct roc_npc_action in_actions[ROC_NPC_MAX_ACTION_COUNT];
280 struct roc_npc *npc = &dev->npc;
281 struct roc_npc_attr in_attr;
282 struct roc_npc_flow *flow;
286 rc = cnxk_map_flow_data(eth_dev, attr, pattern, actions, &in_attr,
287 in_pattern, in_actions,
288 &npc->flowkey_cfg_state);
290 rte_flow_error_set(error, 0, RTE_FLOW_ERROR_TYPE_ACTION_NUM,
291 NULL, "Failed to map flow data");
295 flow = roc_npc_flow_create(npc, &in_attr, in_pattern, in_actions,
298 rte_flow_error_set(error, errcode, errcode, NULL,
299 roc_error_msg_get(errcode));
307 cnxk_flow_destroy(struct rte_eth_dev *eth_dev, struct roc_npc_flow *flow,
308 struct rte_flow_error *error)
310 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
311 struct roc_npc *npc = &dev->npc;
314 rc = roc_npc_flow_destroy(npc, flow);
316 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
317 NULL, "Flow Destroy failed");
322 cnxk_flow_flush(struct rte_eth_dev *eth_dev, struct rte_flow_error *error)
324 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
325 struct roc_npc *npc = &dev->npc;
328 rc = roc_npc_mcam_free_all_resources(npc);
330 rte_flow_error_set(error, EIO, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
331 NULL, "Failed to flush filter");
339 cnxk_flow_query(struct rte_eth_dev *eth_dev, struct rte_flow *flow,
340 const struct rte_flow_action *action, void *data,
341 struct rte_flow_error *error)
343 struct roc_npc_flow *in_flow = (struct roc_npc_flow *)flow;
344 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
345 struct roc_npc *npc = &dev->npc;
346 struct rte_flow_query_count *query = data;
347 const char *errmsg = NULL;
348 int errcode = ENOTSUP;
351 if (action->type != RTE_FLOW_ACTION_TYPE_COUNT) {
352 errmsg = "Only COUNT is supported in query";
356 if (in_flow->ctr_id == NPC_COUNTER_NONE) {
357 errmsg = "Counter is not available";
361 rc = roc_npc_mcam_read_counter(npc, in_flow->ctr_id, &query->hits);
364 errmsg = "Error reading flow counter";
368 query->bytes_set = 0;
371 rc = roc_npc_mcam_clear_counter(npc, in_flow->ctr_id);
374 errmsg = "Error clearing flow counter";
381 rte_flow_error_set(error, errcode, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
387 cnxk_flow_isolate(struct rte_eth_dev *eth_dev __rte_unused,
388 int enable __rte_unused, struct rte_flow_error *error)
390 /* If we support, we need to un-install the default mcam
391 * entry for this port.
394 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
395 NULL, "Flow isolation not supported");
401 cnxk_flow_dev_dump(struct rte_eth_dev *eth_dev, struct rte_flow *flow,
402 FILE *file, struct rte_flow_error *error)
404 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
405 struct roc_npc *npc = &dev->npc;
408 rte_flow_error_set(error, EINVAL,
409 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
415 rte_flow_error_set(error, EINVAL,
416 RTE_FLOW_ERROR_TYPE_HANDLE,
422 roc_npc_flow_dump(file, npc);
427 struct rte_flow_ops cnxk_flow_ops = {
428 .validate = cnxk_flow_validate,
429 .flush = cnxk_flow_flush,
430 .query = cnxk_flow_query,
431 .isolate = cnxk_flow_isolate,
432 .dev_dump = cnxk_flow_dev_dump,