1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2020 Marvell.
4 #include <cnxk_rte_flow.h>
5 #include "cn10k_rte_flow.h"
6 #include "cn10k_ethdev.h"
10 cn10k_mtr_connect(struct rte_eth_dev *eth_dev, uint32_t mtr_id)
12 return nix_mtr_connect(eth_dev, mtr_id);
16 cn10k_mtr_configure(struct rte_eth_dev *eth_dev,
17 const struct rte_flow_action actions[])
19 uint32_t mtr_id = 0xffff, prev_mtr_id = 0xffff, next_mtr_id = 0xffff;
20 const struct rte_flow_action_meter *mtr_conf;
21 const struct rte_flow_action_queue *q_conf;
22 const struct rte_flow_action_rss *rss_conf;
23 struct cnxk_mtr_policy_node *policy;
24 bool is_mtr_act = false;
28 for (i = 0; actions[i].type != RTE_FLOW_ACTION_TYPE_END; i++) {
29 if (actions[i].type == RTE_FLOW_ACTION_TYPE_METER) {
30 mtr_conf = (const struct rte_flow_action_meter
32 mtr_id = mtr_conf->mtr_id;
35 if (actions[i].type == RTE_FLOW_ACTION_TYPE_QUEUE) {
36 q_conf = (const struct rte_flow_action_queue
39 nix_mtr_rq_update(eth_dev, mtr_id, 1,
42 if (actions[i].type == RTE_FLOW_ACTION_TYPE_RSS) {
43 rss_conf = (const struct rte_flow_action_rss
46 nix_mtr_rq_update(eth_dev, mtr_id,
57 while (next_mtr_id != 0xffff) {
58 rc = nix_mtr_validate(eth_dev, next_mtr_id);
62 rc = nix_mtr_policy_act_get(eth_dev, next_mtr_id, &policy);
66 rc = nix_mtr_color_action_validate(eth_dev, mtr_id,
67 &prev_mtr_id, &next_mtr_id,
73 return nix_mtr_configure(eth_dev, mtr_id);
77 cn10k_rss_action_validate(struct rte_eth_dev *eth_dev,
78 const struct rte_flow_attr *attr,
79 const struct rte_flow_action *act)
81 const struct rte_flow_action_rss *rss;
86 rss = (const struct rte_flow_action_rss *)act->conf;
89 plt_err("No support of RSS in egress");
93 if (eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
94 plt_err("multi-queue mode is disabled");
98 if (!rss || !rss->queue_num) {
99 plt_err("no valid queues");
103 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
104 plt_err("non-default RSS hash functions are not supported");
108 if (rss->key_len && rss->key_len > ROC_NIX_RSS_KEY_LEN) {
109 plt_err("RSS hash key too large");
117 cn10k_flow_create(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
118 const struct rte_flow_item pattern[],
119 const struct rte_flow_action actions[],
120 struct rte_flow_error *error)
122 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
123 const struct rte_flow_action *action_rss = NULL;
124 const struct rte_flow_action_meter *mtr = NULL;
125 const struct rte_flow_action *act_q = NULL;
126 int mark_actions = 0, vtag_actions = 0;
127 struct roc_npc *npc = &dev->npc;
128 struct roc_npc_flow *flow;
129 uint32_t req_act = 0;
132 for (i = 0; actions[i].type != RTE_FLOW_ACTION_TYPE_END; i++) {
133 if (actions[i].type == RTE_FLOW_ACTION_TYPE_METER)
134 req_act |= ROC_NPC_ACTION_TYPE_METER;
136 if (actions[i].type == RTE_FLOW_ACTION_TYPE_QUEUE) {
137 req_act |= ROC_NPC_ACTION_TYPE_QUEUE;
140 if (actions[i].type == RTE_FLOW_ACTION_TYPE_RSS) {
141 req_act |= ROC_NPC_ACTION_TYPE_RSS;
142 action_rss = &actions[i];
146 if (req_act & ROC_NPC_ACTION_TYPE_METER) {
147 if ((req_act & ROC_NPC_ACTION_TYPE_RSS) &&
148 ((req_act & ROC_NPC_ACTION_TYPE_QUEUE))) {
151 if (req_act & ROC_NPC_ACTION_TYPE_RSS) {
152 rc = cn10k_rss_action_validate(eth_dev, attr,
156 } else if (req_act & ROC_NPC_ACTION_TYPE_QUEUE) {
157 const struct rte_flow_action_queue *act_queue;
158 act_queue = (const struct rte_flow_action_queue *)
160 if (act_queue->index > eth_dev->data->nb_rx_queues)
167 for (i = 0; actions[i].type != RTE_FLOW_ACTION_TYPE_END; i++) {
168 if (actions[i].type == RTE_FLOW_ACTION_TYPE_METER) {
169 mtr = (const struct rte_flow_action_meter *)actions[i]
171 rc = cn10k_mtr_configure(eth_dev, actions);
173 rte_flow_error_set(error, rc,
174 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
175 "Failed to configure mtr ");
182 flow = cnxk_flow_create(eth_dev, attr, pattern, actions, error);
185 nix_mtr_chain_reset(eth_dev, mtr->mtr_id);
190 cn10k_mtr_connect(eth_dev, mtr->mtr_id);
193 mark_actions = roc_npc_mark_actions_get(npc);
196 dev->rx_offload_flags |= NIX_RX_OFFLOAD_MARK_UPDATE_F;
197 cn10k_eth_set_rx_function(eth_dev);
200 vtag_actions = roc_npc_vtag_actions_get(npc);
203 dev->rx_offload_flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
204 cn10k_eth_set_rx_function(eth_dev);
207 return (struct rte_flow *)flow;
211 cn10k_flow_destroy(struct rte_eth_dev *eth_dev, struct rte_flow *rte_flow,
212 struct rte_flow_error *error)
214 struct roc_npc_flow *flow = (struct roc_npc_flow *)rte_flow;
215 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
216 int mark_actions = 0, vtag_actions = 0;
217 struct roc_npc *npc = &dev->npc;
219 mark_actions = roc_npc_mark_actions_get(npc);
221 mark_actions = roc_npc_mark_actions_sub_return(npc, 1);
222 if (mark_actions == 0) {
223 dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_MARK_UPDATE_F;
224 cn10k_eth_set_rx_function(eth_dev);
228 vtag_actions = roc_npc_vtag_actions_get(npc);
230 if (flow->nix_intf == ROC_NPC_INTF_RX) {
231 vtag_actions = roc_npc_vtag_actions_sub_return(npc, 1);
232 if (vtag_actions == 0) {
233 dev->rx_offload_flags &=
234 ~NIX_RX_OFFLOAD_VLAN_STRIP_F;
235 cn10k_eth_set_rx_function(eth_dev);
240 return cnxk_flow_destroy(eth_dev, flow, error);