net/hns3: move declarations in flow header file
[dpdk.git] / drivers / net / hns3 / hns3_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 HiSilicon Limited.
3  */
4
5 #include <rte_flow_driver.h>
6 #include <rte_io.h>
7 #include <rte_malloc.h>
8
9 #include "hns3_ethdev.h"
10 #include "hns3_logs.h"
11 #include "hns3_flow.h"
12
13 /* Default default keys */
14 static uint8_t hns3_hash_key[] = {
15         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
16         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
17         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
18         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
19         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
20 };
21
22 static const uint8_t full_mask[VNI_OR_TNI_LEN] = { 0xFF, 0xFF, 0xFF };
23 static const uint8_t zero_mask[VNI_OR_TNI_LEN] = { 0x00, 0x00, 0x00 };
24
25 /* Special Filter id for non-specific packet flagging. Don't change value */
26 #define HNS3_MAX_FILTER_ID      0x0FFF
27
28 #define ETHER_TYPE_MASK         0xFFFF
29 #define IPPROTO_MASK            0xFF
30 #define TUNNEL_TYPE_MASK        0xFFFF
31
32 #define HNS3_TUNNEL_TYPE_VXLAN          0x12B5
33 #define HNS3_TUNNEL_TYPE_VXLAN_GPE      0x12B6
34 #define HNS3_TUNNEL_TYPE_GENEVE         0x17C1
35 #define HNS3_TUNNEL_TYPE_NVGRE          0x6558
36
37 static enum rte_flow_item_type first_items[] = {
38         RTE_FLOW_ITEM_TYPE_ETH,
39         RTE_FLOW_ITEM_TYPE_IPV4,
40         RTE_FLOW_ITEM_TYPE_IPV6,
41         RTE_FLOW_ITEM_TYPE_TCP,
42         RTE_FLOW_ITEM_TYPE_UDP,
43         RTE_FLOW_ITEM_TYPE_SCTP,
44         RTE_FLOW_ITEM_TYPE_ICMP,
45         RTE_FLOW_ITEM_TYPE_NVGRE,
46         RTE_FLOW_ITEM_TYPE_VXLAN,
47         RTE_FLOW_ITEM_TYPE_GENEVE,
48         RTE_FLOW_ITEM_TYPE_VXLAN_GPE
49 };
50
51 static enum rte_flow_item_type L2_next_items[] = {
52         RTE_FLOW_ITEM_TYPE_VLAN,
53         RTE_FLOW_ITEM_TYPE_IPV4,
54         RTE_FLOW_ITEM_TYPE_IPV6
55 };
56
57 static enum rte_flow_item_type L3_next_items[] = {
58         RTE_FLOW_ITEM_TYPE_TCP,
59         RTE_FLOW_ITEM_TYPE_UDP,
60         RTE_FLOW_ITEM_TYPE_SCTP,
61         RTE_FLOW_ITEM_TYPE_NVGRE,
62         RTE_FLOW_ITEM_TYPE_ICMP
63 };
64
65 static enum rte_flow_item_type L4_next_items[] = {
66         RTE_FLOW_ITEM_TYPE_VXLAN,
67         RTE_FLOW_ITEM_TYPE_GENEVE,
68         RTE_FLOW_ITEM_TYPE_VXLAN_GPE
69 };
70
71 static enum rte_flow_item_type tunnel_next_items[] = {
72         RTE_FLOW_ITEM_TYPE_ETH,
73         RTE_FLOW_ITEM_TYPE_VLAN
74 };
75
76 struct items_step_mngr {
77         enum rte_flow_item_type *items;
78         int count;
79 };
80
81 static inline void
82 net_addr_to_host(uint32_t *dst, const rte_be32_t *src, size_t len)
83 {
84         size_t i;
85
86         for (i = 0; i < len; i++)
87                 dst[i] = rte_be_to_cpu_32(src[i]);
88 }
89
90 /*
91  * This function is used to find rss general action.
92  * 1. As we know RSS is used to spread packets among several queues, the flow
93  *    API provide the struct rte_flow_action_rss, user could config its field
94  *    sush as: func/level/types/key/queue to control RSS function.
95  * 2. The flow API also supports queue region configuration for hns3. It was
96  *    implemented by FDIR + RSS in hns3 hardware, user can create one FDIR rule
97  *    which action is RSS queues region.
98  * 3. When action is RSS, we use the following rule to distinguish:
99  *    Case 1: pattern have ETH and action's queue_num > 0, indicate it is queue
100  *            region configuration.
101  *    Case other: an rss general action.
102  */
103 static const struct rte_flow_action *
104 hns3_find_rss_general_action(const struct rte_flow_item pattern[],
105                              const struct rte_flow_action actions[])
106 {
107         const struct rte_flow_action *act = NULL;
108         const struct hns3_rss_conf *rss;
109         bool have_eth = false;
110
111         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
112                 if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
113                         act = actions;
114                         break;
115                 }
116         }
117         if (!act)
118                 return NULL;
119
120         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
121                 if (pattern->type == RTE_FLOW_ITEM_TYPE_ETH) {
122                         have_eth = true;
123                         break;
124                 }
125         }
126
127         rss = act->conf;
128         if (have_eth && rss->conf.queue_num) {
129                 /*
130                  * Pattern have ETH and action's queue_num > 0, indicate this is
131                  * queue region configuration.
132                  * Because queue region is implemented by FDIR + RSS in hns3
133                  * hardware, it needs to enter FDIR process, so here return NULL
134                  * to avoid enter RSS process.
135                  */
136                 return NULL;
137         }
138
139         return act;
140 }
141
142 static inline struct hns3_flow_counter *
143 hns3_counter_lookup(struct rte_eth_dev *dev, uint32_t id)
144 {
145         struct hns3_adapter *hns = dev->data->dev_private;
146         struct hns3_pf *pf = &hns->pf;
147         struct hns3_flow_counter *cnt;
148
149         LIST_FOREACH(cnt, &pf->flow_counters, next) {
150                 if (cnt->id == id)
151                         return cnt;
152         }
153         return NULL;
154 }
155
156 static int
157 hns3_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
158                  struct rte_flow_error *error)
159 {
160         struct hns3_adapter *hns = dev->data->dev_private;
161         struct hns3_pf *pf = &hns->pf;
162         struct hns3_hw *hw = &hns->hw;
163         struct hns3_flow_counter *cnt;
164         uint64_t value;
165         int ret;
166
167         cnt = hns3_counter_lookup(dev, id);
168         if (cnt) {
169                 if (!cnt->shared || cnt->shared != shared)
170                         return rte_flow_error_set(error, ENOTSUP,
171                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
172                                 cnt,
173                                 "Counter id is used, shared flag not match");
174                 cnt->ref_cnt++;
175                 return 0;
176         }
177
178         /* Clear the counter by read ops because the counter is read-clear */
179         ret = hns3_get_count(hw, id, &value);
180         if (ret)
181                 return rte_flow_error_set(error, EIO,
182                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
183                                           "Clear counter failed!");
184
185         cnt = rte_zmalloc("hns3 counter", sizeof(*cnt), 0);
186         if (cnt == NULL)
187                 return rte_flow_error_set(error, ENOMEM,
188                                           RTE_FLOW_ERROR_TYPE_HANDLE, cnt,
189                                           "Alloc mem for counter failed");
190         cnt->id = id;
191         cnt->shared = shared;
192         cnt->ref_cnt = 1;
193         cnt->hits = 0;
194         LIST_INSERT_HEAD(&pf->flow_counters, cnt, next);
195         return 0;
196 }
197
198 static int
199 hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow,
200                    struct rte_flow_query_count *qc,
201                    struct rte_flow_error *error)
202 {
203         struct hns3_adapter *hns = dev->data->dev_private;
204         struct hns3_flow_counter *cnt;
205         uint64_t value;
206         int ret;
207
208         /* FDIR is available only in PF driver */
209         if (hns->is_vf)
210                 return rte_flow_error_set(error, ENOTSUP,
211                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
212                                           "Fdir is not supported in VF");
213         cnt = hns3_counter_lookup(dev, flow->counter_id);
214         if (cnt == NULL)
215                 return rte_flow_error_set(error, EINVAL,
216                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
217                                           "Can't find counter id");
218
219         ret = hns3_get_count(&hns->hw, flow->counter_id, &value);
220         if (ret) {
221                 rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE,
222                                    NULL, "Read counter fail.");
223                 return ret;
224         }
225         qc->hits_set = 1;
226         qc->hits = value;
227         qc->bytes_set = 0;
228         qc->bytes = 0;
229
230         return 0;
231 }
232
233 static int
234 hns3_counter_release(struct rte_eth_dev *dev, uint32_t id)
235 {
236         struct hns3_adapter *hns = dev->data->dev_private;
237         struct hns3_hw *hw = &hns->hw;
238         struct hns3_flow_counter *cnt;
239
240         cnt = hns3_counter_lookup(dev, id);
241         if (cnt == NULL) {
242                 hns3_err(hw, "Can't find available counter to release");
243                 return -EINVAL;
244         }
245         cnt->ref_cnt--;
246         if (cnt->ref_cnt == 0) {
247                 LIST_REMOVE(cnt, next);
248                 rte_free(cnt);
249         }
250         return 0;
251 }
252
253 static void
254 hns3_counter_flush(struct rte_eth_dev *dev)
255 {
256         struct hns3_adapter *hns = dev->data->dev_private;
257         struct hns3_pf *pf = &hns->pf;
258         struct hns3_flow_counter *cnt_ptr;
259
260         cnt_ptr = LIST_FIRST(&pf->flow_counters);
261         while (cnt_ptr) {
262                 LIST_REMOVE(cnt_ptr, next);
263                 rte_free(cnt_ptr);
264                 cnt_ptr = LIST_FIRST(&pf->flow_counters);
265         }
266 }
267
268 static int
269 hns3_handle_action_queue(struct rte_eth_dev *dev,
270                          const struct rte_flow_action *action,
271                          struct hns3_fdir_rule *rule,
272                          struct rte_flow_error *error)
273 {
274         struct hns3_adapter *hns = dev->data->dev_private;
275         const struct rte_flow_action_queue *queue;
276         struct hns3_hw *hw = &hns->hw;
277
278         queue = (const struct rte_flow_action_queue *)action->conf;
279         if (queue->index >= hw->data->nb_rx_queues) {
280                 hns3_err(hw, "queue ID(%u) is greater than number of "
281                           "available queue (%u) in driver.",
282                           queue->index, hw->data->nb_rx_queues);
283                 return rte_flow_error_set(error, EINVAL,
284                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
285                                           action, "Invalid queue ID in PF");
286         }
287
288         rule->queue_id = queue->index;
289         rule->nb_queues = 1;
290         rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
291         return 0;
292 }
293
294 static int
295 hns3_handle_action_queue_region(struct rte_eth_dev *dev,
296                                 const struct rte_flow_action *action,
297                                 struct hns3_fdir_rule *rule,
298                                 struct rte_flow_error *error)
299 {
300         struct hns3_adapter *hns = dev->data->dev_private;
301         const struct rte_flow_action_rss *conf = action->conf;
302         struct hns3_hw *hw = &hns->hw;
303         uint16_t idx;
304
305         if (!hns3_dev_get_support(hw, FD_QUEUE_REGION))
306                 return rte_flow_error_set(error, ENOTSUP,
307                         RTE_FLOW_ERROR_TYPE_ACTION, action,
308                         "Not support config queue region!");
309
310         if ((!rte_is_power_of_2(conf->queue_num)) ||
311                 conf->queue_num > hw->rss_size_max ||
312                 conf->queue[0] >= hw->data->nb_rx_queues ||
313                 conf->queue[0] + conf->queue_num > hw->data->nb_rx_queues) {
314                 return rte_flow_error_set(error, EINVAL,
315                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, action,
316                         "Invalid start queue ID and queue num! the start queue "
317                         "ID must valid, the queue num must be power of 2 and "
318                         "<= rss_size_max.");
319         }
320
321         for (idx = 1; idx < conf->queue_num; idx++) {
322                 if (conf->queue[idx] != conf->queue[idx - 1] + 1)
323                         return rte_flow_error_set(error, EINVAL,
324                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF, action,
325                                 "Invalid queue ID sequence! the queue ID "
326                                 "must be continuous increment.");
327         }
328
329         rule->queue_id = conf->queue[0];
330         rule->nb_queues = conf->queue_num;
331         rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
332         return 0;
333 }
334
335 /*
336  * Parse actions structure from the provided pattern.
337  * The pattern is validated as the items are copied.
338  *
339  * @param actions[in]
340  * @param rule[out]
341  *   NIC specfilc actions derived from the actions.
342  * @param error[out]
343  */
344 static int
345 hns3_handle_actions(struct rte_eth_dev *dev,
346                     const struct rte_flow_action actions[],
347                     struct hns3_fdir_rule *rule, struct rte_flow_error *error)
348 {
349         struct hns3_adapter *hns = dev->data->dev_private;
350         const struct rte_flow_action_count *act_count;
351         const struct rte_flow_action_mark *mark;
352         struct hns3_pf *pf = &hns->pf;
353         uint32_t counter_num;
354         int ret;
355
356         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
357                 switch (actions->type) {
358                 case RTE_FLOW_ACTION_TYPE_QUEUE:
359                         ret = hns3_handle_action_queue(dev, actions, rule,
360                                                        error);
361                         if (ret)
362                                 return ret;
363                         break;
364                 case RTE_FLOW_ACTION_TYPE_DROP:
365                         rule->action = HNS3_FD_ACTION_DROP_PACKET;
366                         break;
367                 /*
368                  * Here RSS's real action is queue region.
369                  * Queue region is implemented by FDIR + RSS in hns3 hardware,
370                  * the FDIR's action is one queue region (start_queue_id and
371                  * queue_num), then RSS spread packets to the queue region by
372                  * RSS algorigthm.
373                  */
374                 case RTE_FLOW_ACTION_TYPE_RSS:
375                         ret = hns3_handle_action_queue_region(dev, actions,
376                                                               rule, error);
377                         if (ret)
378                                 return ret;
379                         break;
380                 case RTE_FLOW_ACTION_TYPE_MARK:
381                         mark =
382                             (const struct rte_flow_action_mark *)actions->conf;
383                         if (mark->id >= HNS3_MAX_FILTER_ID)
384                                 return rte_flow_error_set(error, EINVAL,
385                                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
386                                                 actions,
387                                                 "Invalid Mark ID");
388                         rule->fd_id = mark->id;
389                         rule->flags |= HNS3_RULE_FLAG_FDID;
390                         break;
391                 case RTE_FLOW_ACTION_TYPE_FLAG:
392                         rule->fd_id = HNS3_MAX_FILTER_ID;
393                         rule->flags |= HNS3_RULE_FLAG_FDID;
394                         break;
395                 case RTE_FLOW_ACTION_TYPE_COUNT:
396                         act_count =
397                             (const struct rte_flow_action_count *)actions->conf;
398                         counter_num = pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1];
399                         if (act_count->id >= counter_num)
400                                 return rte_flow_error_set(error, EINVAL,
401                                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
402                                                 actions,
403                                                 "Invalid counter id");
404                         rule->act_cnt = *act_count;
405                         rule->flags |= HNS3_RULE_FLAG_COUNTER;
406                         break;
407                 case RTE_FLOW_ACTION_TYPE_VOID:
408                         break;
409                 default:
410                         return rte_flow_error_set(error, ENOTSUP,
411                                                   RTE_FLOW_ERROR_TYPE_ACTION,
412                                                   NULL, "Unsupported action");
413                 }
414         }
415
416         return 0;
417 }
418
419 static int
420 hns3_check_attr(const struct rte_flow_attr *attr, struct rte_flow_error *error)
421 {
422         if (!attr->ingress)
423                 return rte_flow_error_set(error, EINVAL,
424                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
425                                           attr, "Ingress can't be zero");
426         if (attr->egress)
427                 return rte_flow_error_set(error, ENOTSUP,
428                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
429                                           attr, "Not support egress");
430         if (attr->transfer)
431                 return rte_flow_error_set(error, ENOTSUP,
432                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
433                                           attr, "No support for transfer");
434         if (attr->priority)
435                 return rte_flow_error_set(error, ENOTSUP,
436                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
437                                           attr, "Not support priority");
438         if (attr->group)
439                 return rte_flow_error_set(error, ENOTSUP,
440                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
441                                           attr, "Not support group");
442         return 0;
443 }
444
445 static int
446 hns3_parse_eth(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
447                struct rte_flow_error *error __rte_unused)
448 {
449         const struct rte_flow_item_eth *eth_spec;
450         const struct rte_flow_item_eth *eth_mask;
451
452         /* Only used to describe the protocol stack. */
453         if (item->spec == NULL && item->mask == NULL)
454                 return 0;
455
456         if (item->mask) {
457                 eth_mask = item->mask;
458                 if (eth_mask->type) {
459                         hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
460                         rule->key_conf.mask.ether_type =
461                             rte_be_to_cpu_16(eth_mask->type);
462                 }
463                 if (!rte_is_zero_ether_addr(&eth_mask->src)) {
464                         hns3_set_bit(rule->input_set, INNER_SRC_MAC, 1);
465                         memcpy(rule->key_conf.mask.src_mac,
466                                eth_mask->src.addr_bytes, RTE_ETHER_ADDR_LEN);
467                 }
468                 if (!rte_is_zero_ether_addr(&eth_mask->dst)) {
469                         hns3_set_bit(rule->input_set, INNER_DST_MAC, 1);
470                         memcpy(rule->key_conf.mask.dst_mac,
471                                eth_mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN);
472                 }
473         }
474
475         eth_spec = item->spec;
476         rule->key_conf.spec.ether_type = rte_be_to_cpu_16(eth_spec->type);
477         memcpy(rule->key_conf.spec.src_mac, eth_spec->src.addr_bytes,
478                RTE_ETHER_ADDR_LEN);
479         memcpy(rule->key_conf.spec.dst_mac, eth_spec->dst.addr_bytes,
480                RTE_ETHER_ADDR_LEN);
481         return 0;
482 }
483
484 static int
485 hns3_parse_vlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
486                 struct rte_flow_error *error)
487 {
488         const struct rte_flow_item_vlan *vlan_spec;
489         const struct rte_flow_item_vlan *vlan_mask;
490
491         rule->key_conf.vlan_num++;
492         if (rule->key_conf.vlan_num > VLAN_TAG_NUM_MAX)
493                 return rte_flow_error_set(error, EINVAL,
494                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
495                                           "Vlan_num is more than 2");
496
497         /* Only used to describe the protocol stack. */
498         if (item->spec == NULL && item->mask == NULL)
499                 return 0;
500
501         if (item->mask) {
502                 vlan_mask = item->mask;
503                 if (vlan_mask->tci) {
504                         if (rule->key_conf.vlan_num == 1) {
505                                 hns3_set_bit(rule->input_set, INNER_VLAN_TAG1,
506                                              1);
507                                 rule->key_conf.mask.vlan_tag1 =
508                                     rte_be_to_cpu_16(vlan_mask->tci);
509                         } else {
510                                 hns3_set_bit(rule->input_set, INNER_VLAN_TAG2,
511                                              1);
512                                 rule->key_conf.mask.vlan_tag2 =
513                                     rte_be_to_cpu_16(vlan_mask->tci);
514                         }
515                 }
516         }
517
518         vlan_spec = item->spec;
519         if (rule->key_conf.vlan_num == 1)
520                 rule->key_conf.spec.vlan_tag1 =
521                     rte_be_to_cpu_16(vlan_spec->tci);
522         else
523                 rule->key_conf.spec.vlan_tag2 =
524                     rte_be_to_cpu_16(vlan_spec->tci);
525         return 0;
526 }
527
528 static bool
529 hns3_check_ipv4_mask_supported(const struct rte_flow_item_ipv4 *ipv4_mask)
530 {
531         if (ipv4_mask->hdr.total_length || ipv4_mask->hdr.packet_id ||
532             ipv4_mask->hdr.fragment_offset || ipv4_mask->hdr.time_to_live ||
533             ipv4_mask->hdr.hdr_checksum)
534                 return false;
535
536         return true;
537 }
538
539 static int
540 hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
541                 struct rte_flow_error *error)
542 {
543         const struct rte_flow_item_ipv4 *ipv4_spec;
544         const struct rte_flow_item_ipv4 *ipv4_mask;
545
546         hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
547         rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV4;
548         rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
549
550         /* Only used to describe the protocol stack. */
551         if (item->spec == NULL && item->mask == NULL)
552                 return 0;
553
554         if (item->mask) {
555                 ipv4_mask = item->mask;
556                 if (!hns3_check_ipv4_mask_supported(ipv4_mask)) {
557                         return rte_flow_error_set(error, EINVAL,
558                                                   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
559                                                   item,
560                                                   "Only support src & dst ip,tos,proto in IPV4");
561                 }
562
563                 if (ipv4_mask->hdr.src_addr) {
564                         hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
565                         rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID] =
566                             rte_be_to_cpu_32(ipv4_mask->hdr.src_addr);
567                 }
568
569                 if (ipv4_mask->hdr.dst_addr) {
570                         hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
571                         rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID] =
572                             rte_be_to_cpu_32(ipv4_mask->hdr.dst_addr);
573                 }
574
575                 if (ipv4_mask->hdr.type_of_service) {
576                         hns3_set_bit(rule->input_set, INNER_IP_TOS, 1);
577                         rule->key_conf.mask.ip_tos =
578                             ipv4_mask->hdr.type_of_service;
579                 }
580
581                 if (ipv4_mask->hdr.next_proto_id) {
582                         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
583                         rule->key_conf.mask.ip_proto =
584                             ipv4_mask->hdr.next_proto_id;
585                 }
586         }
587
588         ipv4_spec = item->spec;
589         rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID] =
590             rte_be_to_cpu_32(ipv4_spec->hdr.src_addr);
591         rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID] =
592             rte_be_to_cpu_32(ipv4_spec->hdr.dst_addr);
593         rule->key_conf.spec.ip_tos = ipv4_spec->hdr.type_of_service;
594         rule->key_conf.spec.ip_proto = ipv4_spec->hdr.next_proto_id;
595         return 0;
596 }
597
598 static int
599 hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
600                 struct rte_flow_error *error)
601 {
602         const struct rte_flow_item_ipv6 *ipv6_spec;
603         const struct rte_flow_item_ipv6 *ipv6_mask;
604
605         hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
606         rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV6;
607         rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
608
609         /* Only used to describe the protocol stack. */
610         if (item->spec == NULL && item->mask == NULL)
611                 return 0;
612
613         if (item->mask) {
614                 ipv6_mask = item->mask;
615                 if (ipv6_mask->hdr.vtc_flow || ipv6_mask->hdr.payload_len ||
616                     ipv6_mask->hdr.hop_limits) {
617                         return rte_flow_error_set(error, EINVAL,
618                                                   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
619                                                   item,
620                                                   "Only support src & dst ip,proto in IPV6");
621                 }
622                 net_addr_to_host(rule->key_conf.mask.src_ip,
623                                  (const rte_be32_t *)ipv6_mask->hdr.src_addr,
624                                  IP_ADDR_LEN);
625                 net_addr_to_host(rule->key_conf.mask.dst_ip,
626                                  (const rte_be32_t *)ipv6_mask->hdr.dst_addr,
627                                  IP_ADDR_LEN);
628                 rule->key_conf.mask.ip_proto = ipv6_mask->hdr.proto;
629                 if (rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID])
630                         hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
631                 if (rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID])
632                         hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
633                 if (ipv6_mask->hdr.proto)
634                         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
635         }
636
637         ipv6_spec = item->spec;
638         net_addr_to_host(rule->key_conf.spec.src_ip,
639                          (const rte_be32_t *)ipv6_spec->hdr.src_addr,
640                          IP_ADDR_LEN);
641         net_addr_to_host(rule->key_conf.spec.dst_ip,
642                          (const rte_be32_t *)ipv6_spec->hdr.dst_addr,
643                          IP_ADDR_LEN);
644         rule->key_conf.spec.ip_proto = ipv6_spec->hdr.proto;
645
646         return 0;
647 }
648
649 static bool
650 hns3_check_tcp_mask_supported(const struct rte_flow_item_tcp *tcp_mask)
651 {
652         if (tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack ||
653             tcp_mask->hdr.data_off || tcp_mask->hdr.tcp_flags ||
654             tcp_mask->hdr.rx_win || tcp_mask->hdr.cksum ||
655             tcp_mask->hdr.tcp_urp)
656                 return false;
657
658         return true;
659 }
660
661 static int
662 hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
663                struct rte_flow_error *error)
664 {
665         const struct rte_flow_item_tcp *tcp_spec;
666         const struct rte_flow_item_tcp *tcp_mask;
667
668         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
669         rule->key_conf.spec.ip_proto = IPPROTO_TCP;
670         rule->key_conf.mask.ip_proto = IPPROTO_MASK;
671
672         /* Only used to describe the protocol stack. */
673         if (item->spec == NULL && item->mask == NULL)
674                 return 0;
675
676         if (item->mask) {
677                 tcp_mask = item->mask;
678                 if (!hns3_check_tcp_mask_supported(tcp_mask)) {
679                         return rte_flow_error_set(error, EINVAL,
680                                                   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
681                                                   item,
682                                                   "Only support src & dst port in TCP");
683                 }
684
685                 if (tcp_mask->hdr.src_port) {
686                         hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
687                         rule->key_conf.mask.src_port =
688                             rte_be_to_cpu_16(tcp_mask->hdr.src_port);
689                 }
690                 if (tcp_mask->hdr.dst_port) {
691                         hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
692                         rule->key_conf.mask.dst_port =
693                             rte_be_to_cpu_16(tcp_mask->hdr.dst_port);
694                 }
695         }
696
697         tcp_spec = item->spec;
698         rule->key_conf.spec.src_port = rte_be_to_cpu_16(tcp_spec->hdr.src_port);
699         rule->key_conf.spec.dst_port = rte_be_to_cpu_16(tcp_spec->hdr.dst_port);
700
701         return 0;
702 }
703
704 static int
705 hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
706                struct rte_flow_error *error)
707 {
708         const struct rte_flow_item_udp *udp_spec;
709         const struct rte_flow_item_udp *udp_mask;
710
711         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
712         rule->key_conf.spec.ip_proto = IPPROTO_UDP;
713         rule->key_conf.mask.ip_proto = IPPROTO_MASK;
714
715         /* Only used to describe the protocol stack. */
716         if (item->spec == NULL && item->mask == NULL)
717                 return 0;
718
719         if (item->mask) {
720                 udp_mask = item->mask;
721                 if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) {
722                         return rte_flow_error_set(error, EINVAL,
723                                                   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
724                                                   item,
725                                                   "Only support src & dst port in UDP");
726                 }
727                 if (udp_mask->hdr.src_port) {
728                         hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
729                         rule->key_conf.mask.src_port =
730                             rte_be_to_cpu_16(udp_mask->hdr.src_port);
731                 }
732                 if (udp_mask->hdr.dst_port) {
733                         hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
734                         rule->key_conf.mask.dst_port =
735                             rte_be_to_cpu_16(udp_mask->hdr.dst_port);
736                 }
737         }
738
739         udp_spec = item->spec;
740         rule->key_conf.spec.src_port = rte_be_to_cpu_16(udp_spec->hdr.src_port);
741         rule->key_conf.spec.dst_port = rte_be_to_cpu_16(udp_spec->hdr.dst_port);
742
743         return 0;
744 }
745
746 static int
747 hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
748                 struct rte_flow_error *error)
749 {
750         const struct rte_flow_item_sctp *sctp_spec;
751         const struct rte_flow_item_sctp *sctp_mask;
752
753         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
754         rule->key_conf.spec.ip_proto = IPPROTO_SCTP;
755         rule->key_conf.mask.ip_proto = IPPROTO_MASK;
756
757         /* Only used to describe the protocol stack. */
758         if (item->spec == NULL && item->mask == NULL)
759                 return 0;
760
761         if (item->mask) {
762                 sctp_mask = item->mask;
763                 if (sctp_mask->hdr.cksum)
764                         return rte_flow_error_set(error, EINVAL,
765                                                   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
766                                                   item,
767                                                   "Only support src & dst port in SCTP");
768                 if (sctp_mask->hdr.src_port) {
769                         hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
770                         rule->key_conf.mask.src_port =
771                             rte_be_to_cpu_16(sctp_mask->hdr.src_port);
772                 }
773                 if (sctp_mask->hdr.dst_port) {
774                         hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
775                         rule->key_conf.mask.dst_port =
776                             rte_be_to_cpu_16(sctp_mask->hdr.dst_port);
777                 }
778                 if (sctp_mask->hdr.tag) {
779                         hns3_set_bit(rule->input_set, INNER_SCTP_TAG, 1);
780                         rule->key_conf.mask.sctp_tag =
781                             rte_be_to_cpu_32(sctp_mask->hdr.tag);
782                 }
783         }
784
785         sctp_spec = item->spec;
786         rule->key_conf.spec.src_port =
787             rte_be_to_cpu_16(sctp_spec->hdr.src_port);
788         rule->key_conf.spec.dst_port =
789             rte_be_to_cpu_16(sctp_spec->hdr.dst_port);
790         rule->key_conf.spec.sctp_tag = rte_be_to_cpu_32(sctp_spec->hdr.tag);
791
792         return 0;
793 }
794
795 /*
796  * Check items before tunnel, save inner configs to outer configs, and clear
797  * inner configs.
798  * The key consists of two parts: meta_data and tuple keys.
799  * Meta data uses 15 bits, including vlan_num(2bit), des_port(12bit) and tunnel
800  * packet(1bit).
801  * Tuple keys uses 384bit, including ot_dst-mac(48bit), ot_dst-port(16bit),
802  * ot_tun_vni(24bit), ot_flow_id(8bit), src-mac(48bit), dst-mac(48bit),
803  * src-ip(32/128bit), dst-ip(32/128bit), src-port(16bit), dst-port(16bit),
804  * tos(8bit), ether-proto(16bit), ip-proto(8bit), vlantag1(16bit),
805  * Vlantag2(16bit) and sctp-tag(32bit).
806  */
807 static int
808 hns3_handle_tunnel(const struct rte_flow_item *item,
809                    struct hns3_fdir_rule *rule, struct rte_flow_error *error)
810 {
811         /* check eth config */
812         if (rule->input_set & (BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC)))
813                 return rte_flow_error_set(error, EINVAL,
814                                           RTE_FLOW_ERROR_TYPE_ITEM,
815                                           item, "Outer eth mac is unsupported");
816         if (rule->input_set & BIT(INNER_ETH_TYPE)) {
817                 hns3_set_bit(rule->input_set, OUTER_ETH_TYPE, 1);
818                 rule->key_conf.spec.outer_ether_type =
819                     rule->key_conf.spec.ether_type;
820                 rule->key_conf.mask.outer_ether_type =
821                     rule->key_conf.mask.ether_type;
822                 hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 0);
823                 rule->key_conf.spec.ether_type = 0;
824                 rule->key_conf.mask.ether_type = 0;
825         }
826
827         /* check vlan config */
828         if (rule->input_set & (BIT(INNER_VLAN_TAG1) | BIT(INNER_VLAN_TAG2)))
829                 return rte_flow_error_set(error, EINVAL,
830                                           RTE_FLOW_ERROR_TYPE_ITEM,
831                                           item,
832                                           "Outer vlan tags is unsupported");
833
834         /* clear vlan_num for inner vlan select */
835         rule->key_conf.outer_vlan_num = rule->key_conf.vlan_num;
836         rule->key_conf.vlan_num = 0;
837
838         /* check L3 config */
839         if (rule->input_set &
840             (BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | BIT(INNER_IP_TOS)))
841                 return rte_flow_error_set(error, EINVAL,
842                                           RTE_FLOW_ERROR_TYPE_ITEM,
843                                           item, "Outer ip is unsupported");
844         if (rule->input_set & BIT(INNER_IP_PROTO)) {
845                 hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
846                 rule->key_conf.spec.outer_proto = rule->key_conf.spec.ip_proto;
847                 rule->key_conf.mask.outer_proto = rule->key_conf.mask.ip_proto;
848                 hns3_set_bit(rule->input_set, INNER_IP_PROTO, 0);
849                 rule->key_conf.spec.ip_proto = 0;
850                 rule->key_conf.mask.ip_proto = 0;
851         }
852
853         /* check L4 config */
854         if (rule->input_set & BIT(INNER_SCTP_TAG))
855                 return rte_flow_error_set(error, EINVAL,
856                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
857                                           "Outer sctp tag is unsupported");
858
859         if (rule->input_set & BIT(INNER_SRC_PORT)) {
860                 hns3_set_bit(rule->input_set, OUTER_SRC_PORT, 1);
861                 rule->key_conf.spec.outer_src_port =
862                     rule->key_conf.spec.src_port;
863                 rule->key_conf.mask.outer_src_port =
864                     rule->key_conf.mask.src_port;
865                 hns3_set_bit(rule->input_set, INNER_SRC_PORT, 0);
866                 rule->key_conf.spec.src_port = 0;
867                 rule->key_conf.mask.src_port = 0;
868         }
869         if (rule->input_set & BIT(INNER_DST_PORT)) {
870                 hns3_set_bit(rule->input_set, INNER_DST_PORT, 0);
871                 rule->key_conf.spec.dst_port = 0;
872                 rule->key_conf.mask.dst_port = 0;
873         }
874         return 0;
875 }
876
877 static int
878 hns3_parse_vxlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
879                  struct rte_flow_error *error)
880 {
881         const struct rte_flow_item_vxlan *vxlan_spec;
882         const struct rte_flow_item_vxlan *vxlan_mask;
883
884         hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
885         rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
886         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
887                 rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN;
888         else
889                 rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN_GPE;
890
891         /* Only used to describe the protocol stack. */
892         if (item->spec == NULL && item->mask == NULL)
893                 return 0;
894
895         vxlan_mask = item->mask;
896         vxlan_spec = item->spec;
897
898         if (vxlan_mask->flags)
899                 return rte_flow_error_set(error, EINVAL,
900                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
901                                           "Flags is not supported in VxLAN");
902
903         /* VNI must be totally masked or not. */
904         if (memcmp(vxlan_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
905             memcmp(vxlan_mask->vni, zero_mask, VNI_OR_TNI_LEN))
906                 return rte_flow_error_set(error, EINVAL,
907                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
908                                           "VNI must be totally masked or not in VxLAN");
909         if (vxlan_mask->vni[0]) {
910                 hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
911                 memcpy(rule->key_conf.mask.outer_tun_vni, vxlan_mask->vni,
912                            VNI_OR_TNI_LEN);
913         }
914         memcpy(rule->key_conf.spec.outer_tun_vni, vxlan_spec->vni,
915                    VNI_OR_TNI_LEN);
916         return 0;
917 }
918
919 static int
920 hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
921                  struct rte_flow_error *error)
922 {
923         const struct rte_flow_item_nvgre *nvgre_spec;
924         const struct rte_flow_item_nvgre *nvgre_mask;
925
926         hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
927         rule->key_conf.spec.outer_proto = IPPROTO_GRE;
928         rule->key_conf.mask.outer_proto = IPPROTO_MASK;
929
930         hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
931         rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_NVGRE;
932         rule->key_conf.mask.tunnel_type = ~HNS3_TUNNEL_TYPE_NVGRE;
933         /* Only used to describe the protocol stack. */
934         if (item->spec == NULL && item->mask == NULL)
935                 return 0;
936
937         nvgre_mask = item->mask;
938         nvgre_spec = item->spec;
939
940         if (nvgre_mask->protocol || nvgre_mask->c_k_s_rsvd0_ver)
941                 return rte_flow_error_set(error, EINVAL,
942                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
943                                           "Ver/protocal is not supported in NVGRE");
944
945         /* TNI must be totally masked or not. */
946         if (memcmp(nvgre_mask->tni, full_mask, VNI_OR_TNI_LEN) &&
947             memcmp(nvgre_mask->tni, zero_mask, VNI_OR_TNI_LEN))
948                 return rte_flow_error_set(error, EINVAL,
949                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
950                                           "TNI must be totally masked or not in NVGRE");
951
952         if (nvgre_mask->tni[0]) {
953                 hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
954                 memcpy(rule->key_conf.mask.outer_tun_vni, nvgre_mask->tni,
955                            VNI_OR_TNI_LEN);
956         }
957         memcpy(rule->key_conf.spec.outer_tun_vni, nvgre_spec->tni,
958                    VNI_OR_TNI_LEN);
959
960         if (nvgre_mask->flow_id) {
961                 hns3_set_bit(rule->input_set, OUTER_TUN_FLOW_ID, 1);
962                 rule->key_conf.mask.outer_tun_flow_id = nvgre_mask->flow_id;
963         }
964         rule->key_conf.spec.outer_tun_flow_id = nvgre_spec->flow_id;
965         return 0;
966 }
967
968 static int
969 hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
970                   struct rte_flow_error *error)
971 {
972         const struct rte_flow_item_geneve *geneve_spec;
973         const struct rte_flow_item_geneve *geneve_mask;
974
975         hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
976         rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_GENEVE;
977         rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
978         /* Only used to describe the protocol stack. */
979         if (item->spec == NULL && item->mask == NULL)
980                 return 0;
981
982         geneve_mask = item->mask;
983         geneve_spec = item->spec;
984
985         if (geneve_mask->ver_opt_len_o_c_rsvd0 || geneve_mask->protocol)
986                 return rte_flow_error_set(error, EINVAL,
987                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
988                                           "Ver/protocal is not supported in GENEVE");
989         /* VNI must be totally masked or not. */
990         if (memcmp(geneve_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
991             memcmp(geneve_mask->vni, zero_mask, VNI_OR_TNI_LEN))
992                 return rte_flow_error_set(error, EINVAL,
993                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
994                                           "VNI must be totally masked or not in GENEVE");
995         if (geneve_mask->vni[0]) {
996                 hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
997                 memcpy(rule->key_conf.mask.outer_tun_vni, geneve_mask->vni,
998                            VNI_OR_TNI_LEN);
999         }
1000         memcpy(rule->key_conf.spec.outer_tun_vni, geneve_spec->vni,
1001                    VNI_OR_TNI_LEN);
1002         return 0;
1003 }
1004
1005 static int
1006 hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1007                   struct rte_flow_error *error)
1008 {
1009         int ret;
1010
1011         if (item->spec == NULL && item->mask)
1012                 return rte_flow_error_set(error, EINVAL,
1013                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1014                                           "Can't configure FDIR with mask "
1015                                           "but without spec");
1016         else if (item->spec && (item->mask == NULL))
1017                 return rte_flow_error_set(error, EINVAL,
1018                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1019                                           "Tunnel packets must configure "
1020                                           "with mask");
1021
1022         switch (item->type) {
1023         case RTE_FLOW_ITEM_TYPE_VXLAN:
1024         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1025                 ret = hns3_parse_vxlan(item, rule, error);
1026                 break;
1027         case RTE_FLOW_ITEM_TYPE_NVGRE:
1028                 ret = hns3_parse_nvgre(item, rule, error);
1029                 break;
1030         case RTE_FLOW_ITEM_TYPE_GENEVE:
1031                 ret = hns3_parse_geneve(item, rule, error);
1032                 break;
1033         default:
1034                 return rte_flow_error_set(error, ENOTSUP,
1035                                           RTE_FLOW_ERROR_TYPE_ITEM,
1036                                           NULL, "Unsupported tunnel type!");
1037         }
1038         if (ret)
1039                 return ret;
1040         return hns3_handle_tunnel(item, rule, error);
1041 }
1042
1043 static int
1044 hns3_parse_normal(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1045                   struct items_step_mngr *step_mngr,
1046                   struct rte_flow_error *error)
1047 {
1048         int ret;
1049
1050         if (item->spec == NULL && item->mask)
1051                 return rte_flow_error_set(error, EINVAL,
1052                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1053                                           "Can't configure FDIR with mask "
1054                                           "but without spec");
1055
1056         switch (item->type) {
1057         case RTE_FLOW_ITEM_TYPE_ETH:
1058                 ret = hns3_parse_eth(item, rule, error);
1059                 step_mngr->items = L2_next_items;
1060                 step_mngr->count = RTE_DIM(L2_next_items);
1061                 break;
1062         case RTE_FLOW_ITEM_TYPE_VLAN:
1063                 ret = hns3_parse_vlan(item, rule, error);
1064                 step_mngr->items = L2_next_items;
1065                 step_mngr->count = RTE_DIM(L2_next_items);
1066                 break;
1067         case RTE_FLOW_ITEM_TYPE_IPV4:
1068                 ret = hns3_parse_ipv4(item, rule, error);
1069                 step_mngr->items = L3_next_items;
1070                 step_mngr->count = RTE_DIM(L3_next_items);
1071                 break;
1072         case RTE_FLOW_ITEM_TYPE_IPV6:
1073                 ret = hns3_parse_ipv6(item, rule, error);
1074                 step_mngr->items = L3_next_items;
1075                 step_mngr->count = RTE_DIM(L3_next_items);
1076                 break;
1077         case RTE_FLOW_ITEM_TYPE_TCP:
1078                 ret = hns3_parse_tcp(item, rule, error);
1079                 step_mngr->items = L4_next_items;
1080                 step_mngr->count = RTE_DIM(L4_next_items);
1081                 break;
1082         case RTE_FLOW_ITEM_TYPE_UDP:
1083                 ret = hns3_parse_udp(item, rule, error);
1084                 step_mngr->items = L4_next_items;
1085                 step_mngr->count = RTE_DIM(L4_next_items);
1086                 break;
1087         case RTE_FLOW_ITEM_TYPE_SCTP:
1088                 ret = hns3_parse_sctp(item, rule, error);
1089                 step_mngr->items = L4_next_items;
1090                 step_mngr->count = RTE_DIM(L4_next_items);
1091                 break;
1092         default:
1093                 return rte_flow_error_set(error, ENOTSUP,
1094                                           RTE_FLOW_ERROR_TYPE_ITEM,
1095                                           NULL, "Unsupported normal type!");
1096         }
1097
1098         return ret;
1099 }
1100
1101 static int
1102 hns3_validate_item(const struct rte_flow_item *item,
1103                    struct items_step_mngr step_mngr,
1104                    struct rte_flow_error *error)
1105 {
1106         int i;
1107
1108         if (item->last)
1109                 return rte_flow_error_set(error, ENOTSUP,
1110                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, item,
1111                                           "Not supported last point for range");
1112
1113         for (i = 0; i < step_mngr.count; i++) {
1114                 if (item->type == step_mngr.items[i])
1115                         break;
1116         }
1117
1118         if (i == step_mngr.count) {
1119                 return rte_flow_error_set(error, EINVAL,
1120                                           RTE_FLOW_ERROR_TYPE_ITEM,
1121                                           item, "Inval or missing item");
1122         }
1123         return 0;
1124 }
1125
1126 static inline bool
1127 is_tunnel_packet(enum rte_flow_item_type type)
1128 {
1129         if (type == RTE_FLOW_ITEM_TYPE_VXLAN_GPE ||
1130             type == RTE_FLOW_ITEM_TYPE_VXLAN ||
1131             type == RTE_FLOW_ITEM_TYPE_NVGRE ||
1132             type == RTE_FLOW_ITEM_TYPE_GENEVE)
1133                 return true;
1134         return false;
1135 }
1136
1137 /*
1138  * Parse the flow director rule.
1139  * The supported PATTERN:
1140  *   case: non-tunnel packet:
1141  *     ETH : src-mac, dst-mac, ethertype
1142  *     VLAN: tag1, tag2
1143  *     IPv4: src-ip, dst-ip, tos, proto
1144  *     IPv6: src-ip(last 32 bit addr), dst-ip(last 32 bit addr), proto
1145  *     UDP : src-port, dst-port
1146  *     TCP : src-port, dst-port
1147  *     SCTP: src-port, dst-port, tag
1148  *   case: tunnel packet:
1149  *     OUTER-ETH: ethertype
1150  *     OUTER-L3 : proto
1151  *     OUTER-L4 : src-port, dst-port
1152  *     TUNNEL   : vni, flow-id(only valid when NVGRE)
1153  *     INNER-ETH/VLAN/IPv4/IPv6/UDP/TCP/SCTP: same as non-tunnel packet
1154  * The supported ACTION:
1155  *    QUEUE
1156  *    DROP
1157  *    COUNT
1158  *    MARK: the id range [0, 4094]
1159  *    FLAG
1160  *    RSS: only valid if firmware support FD_QUEUE_REGION.
1161  */
1162 static int
1163 hns3_parse_fdir_filter(struct rte_eth_dev *dev,
1164                        const struct rte_flow_item pattern[],
1165                        const struct rte_flow_action actions[],
1166                        struct hns3_fdir_rule *rule,
1167                        struct rte_flow_error *error)
1168 {
1169         struct hns3_adapter *hns = dev->data->dev_private;
1170         const struct rte_flow_item *item;
1171         struct items_step_mngr step_mngr;
1172         int ret;
1173
1174         /* FDIR is available only in PF driver */
1175         if (hns->is_vf)
1176                 return rte_flow_error_set(error, ENOTSUP,
1177                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1178                                           "Fdir not supported in VF");
1179
1180         step_mngr.items = first_items;
1181         step_mngr.count = RTE_DIM(first_items);
1182         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1183                 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
1184                         continue;
1185
1186                 ret = hns3_validate_item(item, step_mngr, error);
1187                 if (ret)
1188                         return ret;
1189
1190                 if (is_tunnel_packet(item->type)) {
1191                         ret = hns3_parse_tunnel(item, rule, error);
1192                         if (ret)
1193                                 return ret;
1194                         step_mngr.items = tunnel_next_items;
1195                         step_mngr.count = RTE_DIM(tunnel_next_items);
1196                 } else {
1197                         ret = hns3_parse_normal(item, rule, &step_mngr, error);
1198                         if (ret)
1199                                 return ret;
1200                 }
1201         }
1202
1203         return hns3_handle_actions(dev, actions, rule, error);
1204 }
1205
1206 static void
1207 hns3_filterlist_flush(struct rte_eth_dev *dev)
1208 {
1209         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1210         struct hns3_fdir_rule_ele *fdir_rule_ptr;
1211         struct hns3_rss_conf_ele *rss_filter_ptr;
1212         struct hns3_flow_mem *flow_node;
1213
1214         fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list);
1215         while (fdir_rule_ptr) {
1216                 TAILQ_REMOVE(&hw->flow_fdir_list, fdir_rule_ptr, entries);
1217                 rte_free(fdir_rule_ptr);
1218                 fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list);
1219         }
1220
1221         rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
1222         while (rss_filter_ptr) {
1223                 TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries);
1224                 rte_free(rss_filter_ptr);
1225                 rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
1226         }
1227
1228         flow_node = TAILQ_FIRST(&hw->flow_list);
1229         while (flow_node) {
1230                 TAILQ_REMOVE(&hw->flow_list, flow_node, entries);
1231                 rte_free(flow_node->flow);
1232                 rte_free(flow_node);
1233                 flow_node = TAILQ_FIRST(&hw->flow_list);
1234         }
1235 }
1236
1237 static bool
1238 hns3_action_rss_same(const struct rte_flow_action_rss *comp,
1239                      const struct rte_flow_action_rss *with)
1240 {
1241         bool func_is_same;
1242
1243         /*
1244          * When user flush all RSS rule, RSS func is set invalid with
1245          * RTE_ETH_HASH_FUNCTION_MAX. Then the user create a flow after
1246          * flushed, any validate RSS func is different with it before
1247          * flushed. Others, when user create an action RSS with RSS func
1248          * specified RTE_ETH_HASH_FUNCTION_DEFAULT, the func is the same
1249          * between continuous RSS flow.
1250          */
1251         if (comp->func == RTE_ETH_HASH_FUNCTION_MAX)
1252                 func_is_same = false;
1253         else
1254                 func_is_same = with->func ? (comp->func == with->func) : true;
1255
1256         return (func_is_same &&
1257                 comp->types == (with->types & HNS3_ETH_RSS_SUPPORT) &&
1258                 comp->level == with->level && comp->key_len == with->key_len &&
1259                 comp->queue_num == with->queue_num &&
1260                 !memcmp(comp->key, with->key, with->key_len) &&
1261                 !memcmp(comp->queue, with->queue,
1262                         sizeof(*with->queue) * with->queue_num));
1263 }
1264
1265 static int
1266 hns3_rss_conf_copy(struct hns3_rss_conf *out,
1267                    const struct rte_flow_action_rss *in)
1268 {
1269         if (in->key_len > RTE_DIM(out->key) ||
1270             in->queue_num > RTE_DIM(out->queue))
1271                 return -EINVAL;
1272         if (in->key == NULL && in->key_len)
1273                 return -EINVAL;
1274         out->conf = (struct rte_flow_action_rss) {
1275                 .func = in->func,
1276                 .level = in->level,
1277                 .types = in->types,
1278                 .key_len = in->key_len,
1279                 .queue_num = in->queue_num,
1280         };
1281         out->conf.queue = memcpy(out->queue, in->queue,
1282                                 sizeof(*in->queue) * in->queue_num);
1283         if (in->key)
1284                 out->conf.key = memcpy(out->key, in->key, in->key_len);
1285
1286         return 0;
1287 }
1288
1289 static bool
1290 hns3_rss_input_tuple_supported(struct hns3_hw *hw,
1291                                const struct rte_flow_action_rss *rss)
1292 {
1293         /*
1294          * For IP packet, it is not supported to use src/dst port fields to RSS
1295          * hash for the following packet types.
1296          * - IPV4 FRAG | IPV4 NONFRAG | IPV6 FRAG | IPV6 NONFRAG
1297          * Besides, for Kunpeng920, the NIC HW is not supported to use src/dst
1298          * port fields to RSS hash for IPV6 SCTP packet type. However, the
1299          * Kunpeng930 and future kunpeng series support to use src/dst port
1300          * fields to RSS hash for IPv6 SCTP packet type.
1301          */
1302         if (rss->types & (RTE_ETH_RSS_L4_DST_ONLY | RTE_ETH_RSS_L4_SRC_ONLY) &&
1303             (rss->types & RTE_ETH_RSS_IP ||
1304             (!hw->rss_info.ipv6_sctp_offload_supported &&
1305             rss->types & RTE_ETH_RSS_NONFRAG_IPV6_SCTP)))
1306                 return false;
1307
1308         return true;
1309 }
1310
1311 /*
1312  * This function is used to parse rss action validatation.
1313  */
1314 static int
1315 hns3_parse_rss_filter(struct rte_eth_dev *dev,
1316                       const struct rte_flow_action *actions,
1317                       struct rte_flow_error *error)
1318 {
1319         struct hns3_adapter *hns = dev->data->dev_private;
1320         struct hns3_hw *hw = &hns->hw;
1321         struct hns3_rss_conf *rss_conf = &hw->rss_info;
1322         const struct rte_flow_action_rss *rss;
1323         const struct rte_flow_action *act;
1324         uint32_t act_index = 0;
1325         uint16_t n;
1326
1327         NEXT_ITEM_OF_ACTION(act, actions, act_index);
1328         rss = act->conf;
1329
1330         if (rss == NULL) {
1331                 return rte_flow_error_set(error, EINVAL,
1332                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1333                                           act, "no valid queues");
1334         }
1335
1336         if (rss->queue_num > RTE_DIM(rss_conf->queue))
1337                 return rte_flow_error_set(error, ENOTSUP,
1338                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1339                                           "queue number configured exceeds "
1340                                           "queue buffer size driver supported");
1341
1342         for (n = 0; n < rss->queue_num; n++) {
1343                 if (rss->queue[n] < hw->alloc_rss_size)
1344                         continue;
1345                 return rte_flow_error_set(error, EINVAL,
1346                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1347                                           "queue id must be less than queue number allocated to a TC");
1348         }
1349
1350         if (!(rss->types & HNS3_ETH_RSS_SUPPORT) && rss->types)
1351                 return rte_flow_error_set(error, EINVAL,
1352                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1353                                           act,
1354                                           "Flow types is unsupported by "
1355                                           "hns3's RSS");
1356         if (rss->func >= RTE_ETH_HASH_FUNCTION_MAX)
1357                 return rte_flow_error_set(error, ENOTSUP,
1358                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1359                                           "RSS hash func are not supported");
1360         if (rss->level)
1361                 return rte_flow_error_set(error, ENOTSUP,
1362                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1363                                           "a nonzero RSS encapsulation level is not supported");
1364         if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
1365                 return rte_flow_error_set(error, ENOTSUP,
1366                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1367                                           "RSS hash key must be exactly 40 bytes");
1368
1369         if (!hns3_rss_input_tuple_supported(hw, rss))
1370                 return rte_flow_error_set(error, EINVAL,
1371                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1372                                           &rss->types,
1373                                           "input RSS types are not supported");
1374
1375         act_index++;
1376
1377         /* Check if the next not void action is END */
1378         NEXT_ITEM_OF_ACTION(act, actions, act_index);
1379         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1380                 memset(rss_conf, 0, sizeof(struct hns3_rss_conf));
1381                 return rte_flow_error_set(error, EINVAL,
1382                                           RTE_FLOW_ERROR_TYPE_ACTION,
1383                                           act, "Not supported action.");
1384         }
1385
1386         return 0;
1387 }
1388
1389 static int
1390 hns3_disable_rss(struct hns3_hw *hw)
1391 {
1392         int ret;
1393
1394         /* Redirected the redirection table to queue 0 */
1395         ret = hns3_rss_reset_indir_table(hw);
1396         if (ret)
1397                 return ret;
1398
1399         /* Disable RSS */
1400         hw->rss_info.conf.types = 0;
1401         hw->rss_dis_flag = true;
1402
1403         return 0;
1404 }
1405
1406 static void
1407 hns3_parse_rss_key(struct hns3_hw *hw, struct rte_flow_action_rss *rss_conf)
1408 {
1409         if (rss_conf->key == NULL || rss_conf->key_len < HNS3_RSS_KEY_SIZE) {
1410                 hns3_warn(hw, "Default RSS hash key to be set");
1411                 rss_conf->key = hns3_hash_key;
1412                 rss_conf->key_len = HNS3_RSS_KEY_SIZE;
1413         }
1414 }
1415
1416 static int
1417 hns3_parse_rss_algorithm(struct hns3_hw *hw, enum rte_eth_hash_function *func,
1418                          uint8_t *hash_algo)
1419 {
1420         enum rte_eth_hash_function algo_func = *func;
1421         switch (algo_func) {
1422         case RTE_ETH_HASH_FUNCTION_DEFAULT:
1423                 /* Keep *hash_algo as what it used to be */
1424                 algo_func = hw->rss_info.conf.func;
1425                 break;
1426         case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1427                 *hash_algo = HNS3_RSS_HASH_ALGO_TOEPLITZ;
1428                 break;
1429         case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
1430                 *hash_algo = HNS3_RSS_HASH_ALGO_SIMPLE;
1431                 break;
1432         case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ:
1433                 *hash_algo = HNS3_RSS_HASH_ALGO_SYMMETRIC_TOEP;
1434                 break;
1435         default:
1436                 hns3_err(hw, "Invalid RSS algorithm configuration(%d)",
1437                          algo_func);
1438                 return -EINVAL;
1439         }
1440         *func = algo_func;
1441
1442         return 0;
1443 }
1444
1445 static int
1446 hns3_hw_rss_hash_set(struct hns3_hw *hw, struct rte_flow_action_rss *rss_config)
1447 {
1448         struct hns3_rss_tuple_cfg *tuple;
1449         int ret;
1450
1451         hns3_parse_rss_key(hw, rss_config);
1452
1453         ret = hns3_parse_rss_algorithm(hw, &rss_config->func,
1454                                        &hw->rss_info.hash_algo);
1455         if (ret)
1456                 return ret;
1457
1458         ret = hns3_rss_set_algo_key(hw, rss_config->key);
1459         if (ret)
1460                 return ret;
1461
1462         hw->rss_info.conf.func = rss_config->func;
1463
1464         tuple = &hw->rss_info.rss_tuple_sets;
1465         ret = hns3_set_rss_tuple_by_rss_hf(hw, tuple, rss_config->types);
1466         if (ret)
1467                 hns3_err(hw, "Update RSS tuples by rss hf failed %d", ret);
1468
1469         return ret;
1470 }
1471
1472 static int
1473 hns3_update_indir_table(struct rte_eth_dev *dev,
1474                         const struct rte_flow_action_rss *conf, uint16_t num)
1475 {
1476         struct hns3_adapter *hns = dev->data->dev_private;
1477         struct hns3_hw *hw = &hns->hw;
1478         uint16_t indir_tbl[HNS3_RSS_IND_TBL_SIZE_MAX];
1479         uint16_t j;
1480         uint32_t i;
1481
1482         /* Fill in redirection table */
1483         memcpy(indir_tbl, hw->rss_info.rss_indirection_tbl,
1484                sizeof(hw->rss_info.rss_indirection_tbl));
1485         for (i = 0, j = 0; i < hw->rss_ind_tbl_size; i++, j++) {
1486                 j %= num;
1487                 if (conf->queue[j] >= hw->alloc_rss_size) {
1488                         hns3_err(hw, "queue id(%u) set to redirection table "
1489                                  "exceeds queue number(%u) allocated to a TC.",
1490                                  conf->queue[j], hw->alloc_rss_size);
1491                         return -EINVAL;
1492                 }
1493                 indir_tbl[i] = conf->queue[j];
1494         }
1495
1496         return hns3_set_rss_indir_table(hw, indir_tbl, hw->rss_ind_tbl_size);
1497 }
1498
1499 static int
1500 hns3_config_rss_filter(struct rte_eth_dev *dev,
1501                        const struct hns3_rss_conf *conf, bool add)
1502 {
1503         struct hns3_adapter *hns = dev->data->dev_private;
1504         struct hns3_rss_conf_ele *rss_filter_ptr;
1505         struct hns3_hw *hw = &hns->hw;
1506         struct hns3_rss_conf *rss_info;
1507         uint64_t flow_types;
1508         uint16_t num;
1509         int ret;
1510
1511         struct rte_flow_action_rss rss_flow_conf = {
1512                 .func = conf->conf.func,
1513                 .level = conf->conf.level,
1514                 .types = conf->conf.types,
1515                 .key_len = conf->conf.key_len,
1516                 .queue_num = conf->conf.queue_num,
1517                 .key = conf->conf.key_len ?
1518                     (void *)(uintptr_t)conf->conf.key : NULL,
1519                 .queue = conf->conf.queue,
1520         };
1521
1522         /* Filter the unsupported flow types */
1523         flow_types = conf->conf.types ?
1524                      rss_flow_conf.types & HNS3_ETH_RSS_SUPPORT :
1525                      hw->rss_info.conf.types;
1526         if (flow_types != rss_flow_conf.types)
1527                 hns3_warn(hw, "modified RSS types based on hardware support, "
1528                               "requested:0x%" PRIx64 " configured:0x%" PRIx64,
1529                           rss_flow_conf.types, flow_types);
1530         /* Update the useful flow types */
1531         rss_flow_conf.types = flow_types;
1532
1533         rss_info = &hw->rss_info;
1534         if (!add) {
1535                 if (!conf->valid)
1536                         return 0;
1537
1538                 ret = hns3_disable_rss(hw);
1539                 if (ret) {
1540                         hns3_err(hw, "RSS disable failed(%d)", ret);
1541                         return ret;
1542                 }
1543
1544                 if (rss_flow_conf.queue_num) {
1545                         /*
1546                          * Due the content of queue pointer have been reset to
1547                          * 0, the rss_info->conf.queue should be set to NULL
1548                          */
1549                         rss_info->conf.queue = NULL;
1550                         rss_info->conf.queue_num = 0;
1551                 }
1552
1553                 /* set RSS func invalid after flushed */
1554                 rss_info->conf.func = RTE_ETH_HASH_FUNCTION_MAX;
1555                 return 0;
1556         }
1557
1558         /* Set rx queues to use */
1559         num = RTE_MIN(dev->data->nb_rx_queues, rss_flow_conf.queue_num);
1560         if (rss_flow_conf.queue_num > num)
1561                 hns3_warn(hw, "Config queue numbers %u are beyond the scope of truncated",
1562                           rss_flow_conf.queue_num);
1563         hns3_info(hw, "Max of contiguous %u PF queues are configured", num);
1564
1565         rte_spinlock_lock(&hw->lock);
1566         if (num) {
1567                 ret = hns3_update_indir_table(dev, &rss_flow_conf, num);
1568                 if (ret)
1569                         goto rss_config_err;
1570         }
1571
1572         /* Set hash algorithm and flow types by the user's config */
1573         ret = hns3_hw_rss_hash_set(hw, &rss_flow_conf);
1574         if (ret)
1575                 goto rss_config_err;
1576
1577         ret = hns3_rss_conf_copy(rss_info, &rss_flow_conf);
1578         if (ret) {
1579                 hns3_err(hw, "RSS config init fail(%d)", ret);
1580                 goto rss_config_err;
1581         }
1582
1583         /*
1584          * When create a new RSS rule, the old rule will be overlaid and set
1585          * invalid.
1586          */
1587         TAILQ_FOREACH(rss_filter_ptr, &hw->flow_rss_list, entries)
1588                 rss_filter_ptr->filter_info.valid = false;
1589
1590 rss_config_err:
1591         rte_spinlock_unlock(&hw->lock);
1592
1593         return ret;
1594 }
1595
1596 static int
1597 hns3_clear_rss_filter(struct rte_eth_dev *dev)
1598 {
1599         struct hns3_adapter *hns = dev->data->dev_private;
1600         struct hns3_rss_conf_ele *rss_filter_ptr;
1601         struct hns3_hw *hw = &hns->hw;
1602         int rss_rule_succ_cnt = 0; /* count for success of clearing RSS rules */
1603         int rss_rule_fail_cnt = 0; /* count for failure of clearing RSS rules */
1604         int ret = 0;
1605
1606         rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
1607         while (rss_filter_ptr) {
1608                 TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries);
1609                 ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info,
1610                                              false);
1611                 if (ret)
1612                         rss_rule_fail_cnt++;
1613                 else
1614                         rss_rule_succ_cnt++;
1615                 rte_free(rss_filter_ptr);
1616                 rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
1617         }
1618
1619         if (rss_rule_fail_cnt) {
1620                 hns3_err(hw, "fail to delete all RSS filters, success num = %d "
1621                              "fail num = %d", rss_rule_succ_cnt,
1622                              rss_rule_fail_cnt);
1623                 ret = -EIO;
1624         }
1625
1626         return ret;
1627 }
1628
1629 int
1630 hns3_restore_rss_filter(struct rte_eth_dev *dev)
1631 {
1632         struct hns3_adapter *hns = dev->data->dev_private;
1633         struct hns3_hw *hw = &hns->hw;
1634
1635         /* When user flush all rules, it doesn't need to restore RSS rule */
1636         if (hw->rss_info.conf.func == RTE_ETH_HASH_FUNCTION_MAX)
1637                 return 0;
1638
1639         return hns3_config_rss_filter(dev, &hw->rss_info, true);
1640 }
1641
1642 static int
1643 hns3_flow_parse_rss(struct rte_eth_dev *dev,
1644                     const struct hns3_rss_conf *conf, bool add)
1645 {
1646         struct hns3_adapter *hns = dev->data->dev_private;
1647         struct hns3_hw *hw = &hns->hw;
1648         bool ret;
1649
1650         ret = hns3_action_rss_same(&hw->rss_info.conf, &conf->conf);
1651         if (ret) {
1652                 hns3_err(hw, "Enter duplicate RSS configuration : %d", ret);
1653                 return -EINVAL;
1654         }
1655
1656         return hns3_config_rss_filter(dev, conf, add);
1657 }
1658
1659 static int
1660 hns3_flow_args_check(const struct rte_flow_attr *attr,
1661                      const struct rte_flow_item pattern[],
1662                      const struct rte_flow_action actions[],
1663                      struct rte_flow_error *error)
1664 {
1665         if (pattern == NULL)
1666                 return rte_flow_error_set(error, EINVAL,
1667                                           RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1668                                           NULL, "NULL pattern.");
1669
1670         if (actions == NULL)
1671                 return rte_flow_error_set(error, EINVAL,
1672                                           RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1673                                           NULL, "NULL action.");
1674
1675         if (attr == NULL)
1676                 return rte_flow_error_set(error, EINVAL,
1677                                           RTE_FLOW_ERROR_TYPE_ATTR,
1678                                           NULL, "NULL attribute.");
1679
1680         return hns3_check_attr(attr, error);
1681 }
1682
1683 /*
1684  * Check if the flow rule is supported by hns3.
1685  * It only checkes the format. Don't guarantee the rule can be programmed into
1686  * the HW. Because there can be no enough room for the rule.
1687  */
1688 static int
1689 hns3_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1690                    const struct rte_flow_item pattern[],
1691                    const struct rte_flow_action actions[],
1692                    struct rte_flow_error *error)
1693 {
1694         struct hns3_fdir_rule fdir_rule;
1695         int ret;
1696
1697         ret = hns3_flow_args_check(attr, pattern, actions, error);
1698         if (ret)
1699                 return ret;
1700
1701         if (hns3_find_rss_general_action(pattern, actions))
1702                 return hns3_parse_rss_filter(dev, actions, error);
1703
1704         memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
1705         return hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
1706 }
1707
1708 /*
1709  * Create or destroy a flow rule.
1710  * Theorically one rule can match more than one filters.
1711  * We will let it use the filter which it hit first.
1712  * So, the sequence matters.
1713  */
1714 static struct rte_flow *
1715 hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1716                  const struct rte_flow_item pattern[],
1717                  const struct rte_flow_action actions[],
1718                  struct rte_flow_error *error)
1719 {
1720         struct hns3_adapter *hns = dev->data->dev_private;
1721         struct hns3_hw *hw = &hns->hw;
1722         const struct hns3_rss_conf *rss_conf;
1723         struct hns3_fdir_rule_ele *fdir_rule_ptr;
1724         struct hns3_rss_conf_ele *rss_filter_ptr;
1725         struct hns3_flow_mem *flow_node;
1726         const struct rte_flow_action *act;
1727         struct rte_flow *flow;
1728         struct hns3_fdir_rule fdir_rule;
1729         int ret;
1730
1731         ret = hns3_flow_validate(dev, attr, pattern, actions, error);
1732         if (ret)
1733                 return NULL;
1734
1735         flow = rte_zmalloc("hns3 flow", sizeof(struct rte_flow), 0);
1736         if (flow == NULL) {
1737                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1738                                    NULL, "Failed to allocate flow memory");
1739                 return NULL;
1740         }
1741         flow_node = rte_zmalloc("hns3 flow node",
1742                                 sizeof(struct hns3_flow_mem), 0);
1743         if (flow_node == NULL) {
1744                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1745                                    NULL, "Failed to allocate flow list memory");
1746                 rte_free(flow);
1747                 return NULL;
1748         }
1749
1750         flow_node->flow = flow;
1751         TAILQ_INSERT_TAIL(&hw->flow_list, flow_node, entries);
1752
1753         act = hns3_find_rss_general_action(pattern, actions);
1754         if (act) {
1755                 rss_conf = act->conf;
1756
1757                 ret = hns3_flow_parse_rss(dev, rss_conf, true);
1758                 if (ret)
1759                         goto err;
1760
1761                 rss_filter_ptr = rte_zmalloc("hns3 rss filter",
1762                                              sizeof(struct hns3_rss_conf_ele),
1763                                              0);
1764                 if (rss_filter_ptr == NULL) {
1765                         hns3_err(hw,
1766                                     "Failed to allocate hns3_rss_filter memory");
1767                         ret = -ENOMEM;
1768                         goto err;
1769                 }
1770                 hns3_rss_conf_copy(&rss_filter_ptr->filter_info,
1771                                    &rss_conf->conf);
1772                 rss_filter_ptr->filter_info.valid = true;
1773                 TAILQ_INSERT_TAIL(&hw->flow_rss_list, rss_filter_ptr, entries);
1774
1775                 flow->rule = rss_filter_ptr;
1776                 flow->filter_type = RTE_ETH_FILTER_HASH;
1777                 return flow;
1778         }
1779
1780         memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
1781         ret = hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
1782         if (ret)
1783                 goto out;
1784
1785         if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) {
1786                 ret = hns3_counter_new(dev, 0, fdir_rule.act_cnt.id, error);
1787                 if (ret)
1788                         goto out;
1789
1790                 flow->counter_id = fdir_rule.act_cnt.id;
1791         }
1792
1793         fdir_rule_ptr = rte_zmalloc("hns3 fdir rule",
1794                                     sizeof(struct hns3_fdir_rule_ele),
1795                                     0);
1796         if (fdir_rule_ptr == NULL) {
1797                 hns3_err(hw, "failed to allocate fdir_rule memory.");
1798                 ret = -ENOMEM;
1799                 goto err_fdir;
1800         }
1801
1802         ret = hns3_fdir_filter_program(hns, &fdir_rule, false);
1803         if (!ret) {
1804                 memcpy(&fdir_rule_ptr->fdir_conf, &fdir_rule,
1805                         sizeof(struct hns3_fdir_rule));
1806                 TAILQ_INSERT_TAIL(&hw->flow_fdir_list, fdir_rule_ptr, entries);
1807                 flow->rule = fdir_rule_ptr;
1808                 flow->filter_type = RTE_ETH_FILTER_FDIR;
1809
1810                 return flow;
1811         }
1812
1813         rte_free(fdir_rule_ptr);
1814 err_fdir:
1815         if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
1816                 hns3_counter_release(dev, fdir_rule.act_cnt.id);
1817 err:
1818         rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1819                            "Failed to create flow");
1820 out:
1821         TAILQ_REMOVE(&hw->flow_list, flow_node, entries);
1822         rte_free(flow_node);
1823         rte_free(flow);
1824         return NULL;
1825 }
1826
1827 /* Destroy a flow rule on hns3. */
1828 static int
1829 hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1830                   struct rte_flow_error *error)
1831 {
1832         struct hns3_adapter *hns = dev->data->dev_private;
1833         struct hns3_fdir_rule_ele *fdir_rule_ptr;
1834         struct hns3_rss_conf_ele *rss_filter_ptr;
1835         struct hns3_flow_mem *flow_node;
1836         enum rte_filter_type filter_type;
1837         struct hns3_fdir_rule fdir_rule;
1838         struct hns3_hw *hw = &hns->hw;
1839         int ret;
1840
1841         if (flow == NULL)
1842                 return rte_flow_error_set(error, EINVAL,
1843                                           RTE_FLOW_ERROR_TYPE_HANDLE,
1844                                           flow, "Flow is NULL");
1845
1846         filter_type = flow->filter_type;
1847         switch (filter_type) {
1848         case RTE_ETH_FILTER_FDIR:
1849                 fdir_rule_ptr = (struct hns3_fdir_rule_ele *)flow->rule;
1850                 memcpy(&fdir_rule, &fdir_rule_ptr->fdir_conf,
1851                            sizeof(struct hns3_fdir_rule));
1852
1853                 ret = hns3_fdir_filter_program(hns, &fdir_rule, true);
1854                 if (ret)
1855                         return rte_flow_error_set(error, EIO,
1856                                                   RTE_FLOW_ERROR_TYPE_HANDLE,
1857                                                   flow,
1858                                                   "Destroy FDIR fail.Try again");
1859                 if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
1860                         hns3_counter_release(dev, fdir_rule.act_cnt.id);
1861                 TAILQ_REMOVE(&hw->flow_fdir_list, fdir_rule_ptr, entries);
1862                 rte_free(fdir_rule_ptr);
1863                 fdir_rule_ptr = NULL;
1864                 break;
1865         case RTE_ETH_FILTER_HASH:
1866                 rss_filter_ptr = (struct hns3_rss_conf_ele *)flow->rule;
1867                 ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info,
1868                                              false);
1869                 if (ret)
1870                         return rte_flow_error_set(error, EIO,
1871                                                   RTE_FLOW_ERROR_TYPE_HANDLE,
1872                                                   flow,
1873                                                   "Destroy RSS fail.Try again");
1874                 TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries);
1875                 rte_free(rss_filter_ptr);
1876                 rss_filter_ptr = NULL;
1877                 break;
1878         default:
1879                 return rte_flow_error_set(error, EINVAL,
1880                                           RTE_FLOW_ERROR_TYPE_HANDLE, flow,
1881                                           "Unsupported filter type");
1882         }
1883
1884         TAILQ_FOREACH(flow_node, &hw->flow_list, entries) {
1885                 if (flow_node->flow == flow) {
1886                         TAILQ_REMOVE(&hw->flow_list, flow_node, entries);
1887                         rte_free(flow_node);
1888                         flow_node = NULL;
1889                         break;
1890                 }
1891         }
1892         rte_free(flow);
1893         flow = NULL;
1894
1895         return 0;
1896 }
1897
1898 /*  Destroy all flow rules associated with a port on hns3. */
1899 static int
1900 hns3_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1901 {
1902         struct hns3_adapter *hns = dev->data->dev_private;
1903         int ret;
1904
1905         /* FDIR is available only in PF driver */
1906         if (!hns->is_vf) {
1907                 ret = hns3_clear_all_fdir_filter(hns);
1908                 if (ret) {
1909                         rte_flow_error_set(error, ret,
1910                                            RTE_FLOW_ERROR_TYPE_HANDLE,
1911                                            NULL, "Failed to flush rule");
1912                         return ret;
1913                 }
1914                 hns3_counter_flush(dev);
1915         }
1916
1917         ret = hns3_clear_rss_filter(dev);
1918         if (ret) {
1919                 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1920                                    NULL, "Failed to flush rss filter");
1921                 return ret;
1922         }
1923
1924         hns3_filterlist_flush(dev);
1925
1926         return 0;
1927 }
1928
1929 /* Query an existing flow rule. */
1930 static int
1931 hns3_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
1932                 const struct rte_flow_action *actions, void *data,
1933                 struct rte_flow_error *error)
1934 {
1935         struct rte_flow_action_rss *rss_conf;
1936         struct hns3_rss_conf_ele *rss_rule;
1937         struct rte_flow_query_count *qc;
1938         int ret;
1939
1940         if (!flow->rule)
1941                 return rte_flow_error_set(error, EINVAL,
1942                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "invalid rule");
1943
1944         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1945                 switch (actions->type) {
1946                 case RTE_FLOW_ACTION_TYPE_VOID:
1947                         break;
1948                 case RTE_FLOW_ACTION_TYPE_COUNT:
1949                         qc = (struct rte_flow_query_count *)data;
1950                         ret = hns3_counter_query(dev, flow, qc, error);
1951                         if (ret)
1952                                 return ret;
1953                         break;
1954                 case RTE_FLOW_ACTION_TYPE_RSS:
1955                         if (flow->filter_type != RTE_ETH_FILTER_HASH) {
1956                                 return rte_flow_error_set(error, ENOTSUP,
1957                                         RTE_FLOW_ERROR_TYPE_ACTION,
1958                                         actions, "action is not supported");
1959                         }
1960                         rss_conf = (struct rte_flow_action_rss *)data;
1961                         rss_rule = (struct hns3_rss_conf_ele *)flow->rule;
1962                         rte_memcpy(rss_conf, &rss_rule->filter_info.conf,
1963                                    sizeof(struct rte_flow_action_rss));
1964                         break;
1965                 default:
1966                         return rte_flow_error_set(error, ENOTSUP,
1967                                 RTE_FLOW_ERROR_TYPE_ACTION,
1968                                 actions, "action is not supported");
1969                 }
1970         }
1971
1972         return 0;
1973 }
1974
1975 static int
1976 hns3_flow_validate_wrap(struct rte_eth_dev *dev,
1977                         const struct rte_flow_attr *attr,
1978                         const struct rte_flow_item pattern[],
1979                         const struct rte_flow_action actions[],
1980                         struct rte_flow_error *error)
1981 {
1982         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1983         int ret;
1984
1985         pthread_mutex_lock(&hw->flows_lock);
1986         ret = hns3_flow_validate(dev, attr, pattern, actions, error);
1987         pthread_mutex_unlock(&hw->flows_lock);
1988
1989         return ret;
1990 }
1991
1992 static struct rte_flow *
1993 hns3_flow_create_wrap(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1994                       const struct rte_flow_item pattern[],
1995                       const struct rte_flow_action actions[],
1996                       struct rte_flow_error *error)
1997 {
1998         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1999         struct rte_flow *flow;
2000
2001         pthread_mutex_lock(&hw->flows_lock);
2002         flow = hns3_flow_create(dev, attr, pattern, actions, error);
2003         pthread_mutex_unlock(&hw->flows_lock);
2004
2005         return flow;
2006 }
2007
2008 static int
2009 hns3_flow_destroy_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
2010                        struct rte_flow_error *error)
2011 {
2012         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2013         int ret;
2014
2015         pthread_mutex_lock(&hw->flows_lock);
2016         ret = hns3_flow_destroy(dev, flow, error);
2017         pthread_mutex_unlock(&hw->flows_lock);
2018
2019         return ret;
2020 }
2021
2022 static int
2023 hns3_flow_flush_wrap(struct rte_eth_dev *dev, struct rte_flow_error *error)
2024 {
2025         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2026         int ret;
2027
2028         pthread_mutex_lock(&hw->flows_lock);
2029         ret = hns3_flow_flush(dev, error);
2030         pthread_mutex_unlock(&hw->flows_lock);
2031
2032         return ret;
2033 }
2034
2035 static int
2036 hns3_flow_query_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
2037                      const struct rte_flow_action *actions, void *data,
2038                      struct rte_flow_error *error)
2039 {
2040         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2041         int ret;
2042
2043         pthread_mutex_lock(&hw->flows_lock);
2044         ret = hns3_flow_query(dev, flow, actions, data, error);
2045         pthread_mutex_unlock(&hw->flows_lock);
2046
2047         return ret;
2048 }
2049
2050 static const struct rte_flow_ops hns3_flow_ops = {
2051         .validate = hns3_flow_validate_wrap,
2052         .create = hns3_flow_create_wrap,
2053         .destroy = hns3_flow_destroy_wrap,
2054         .flush = hns3_flow_flush_wrap,
2055         .query = hns3_flow_query_wrap,
2056         .isolate = NULL,
2057 };
2058
2059 int
2060 hns3_dev_flow_ops_get(struct rte_eth_dev *dev,
2061                       const struct rte_flow_ops **ops)
2062 {
2063         struct hns3_hw *hw;
2064
2065         hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2066         if (hw->adapter_state >= HNS3_NIC_CLOSED)
2067                 return -ENODEV;
2068
2069         *ops = &hns3_flow_ops;
2070         return 0;
2071 }
2072
2073 void
2074 hns3_flow_init(struct rte_eth_dev *dev)
2075 {
2076         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2077         pthread_mutexattr_t attr;
2078
2079         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2080                 return;
2081
2082         pthread_mutexattr_init(&attr);
2083         pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
2084         pthread_mutex_init(&hw->flows_lock, &attr);
2085         dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
2086
2087         TAILQ_INIT(&hw->flow_fdir_list);
2088         TAILQ_INIT(&hw->flow_rss_list);
2089         TAILQ_INIT(&hw->flow_list);
2090 }
2091
2092 void
2093 hns3_flow_uninit(struct rte_eth_dev *dev)
2094 {
2095         struct rte_flow_error error;
2096         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2097                 hns3_flow_flush_wrap(dev, &error);
2098 }