event/cnxk: support vectorized Rx event fast path
[dpdk.git] / drivers / net / hns3 / hns3_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 HiSilicon Limited.
3  */
4
5 #include <rte_flow_driver.h>
6 #include <rte_io.h>
7 #include <rte_malloc.h>
8
9 #include "hns3_ethdev.h"
10 #include "hns3_logs.h"
11
12 /* Default default keys */
13 static uint8_t hns3_hash_key[] = {
14         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
15         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
16         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
17         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
18         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
19 };
20
21 static const uint8_t full_mask[VNI_OR_TNI_LEN] = { 0xFF, 0xFF, 0xFF };
22 static const uint8_t zero_mask[VNI_OR_TNI_LEN] = { 0x00, 0x00, 0x00 };
23
24 /* Special Filter id for non-specific packet flagging. Don't change value */
25 #define HNS3_MAX_FILTER_ID      0x0FFF
26
27 #define ETHER_TYPE_MASK         0xFFFF
28 #define IPPROTO_MASK            0xFF
29 #define TUNNEL_TYPE_MASK        0xFFFF
30
31 #define HNS3_TUNNEL_TYPE_VXLAN          0x12B5
32 #define HNS3_TUNNEL_TYPE_VXLAN_GPE      0x12B6
33 #define HNS3_TUNNEL_TYPE_GENEVE         0x17C1
34 #define HNS3_TUNNEL_TYPE_NVGRE          0x6558
35
36 static enum rte_flow_item_type first_items[] = {
37         RTE_FLOW_ITEM_TYPE_ETH,
38         RTE_FLOW_ITEM_TYPE_IPV4,
39         RTE_FLOW_ITEM_TYPE_IPV6,
40         RTE_FLOW_ITEM_TYPE_TCP,
41         RTE_FLOW_ITEM_TYPE_UDP,
42         RTE_FLOW_ITEM_TYPE_SCTP,
43         RTE_FLOW_ITEM_TYPE_ICMP,
44         RTE_FLOW_ITEM_TYPE_NVGRE,
45         RTE_FLOW_ITEM_TYPE_VXLAN,
46         RTE_FLOW_ITEM_TYPE_GENEVE,
47         RTE_FLOW_ITEM_TYPE_VXLAN_GPE
48 };
49
50 static enum rte_flow_item_type L2_next_items[] = {
51         RTE_FLOW_ITEM_TYPE_VLAN,
52         RTE_FLOW_ITEM_TYPE_IPV4,
53         RTE_FLOW_ITEM_TYPE_IPV6
54 };
55
56 static enum rte_flow_item_type L3_next_items[] = {
57         RTE_FLOW_ITEM_TYPE_TCP,
58         RTE_FLOW_ITEM_TYPE_UDP,
59         RTE_FLOW_ITEM_TYPE_SCTP,
60         RTE_FLOW_ITEM_TYPE_NVGRE,
61         RTE_FLOW_ITEM_TYPE_ICMP
62 };
63
64 static enum rte_flow_item_type L4_next_items[] = {
65         RTE_FLOW_ITEM_TYPE_VXLAN,
66         RTE_FLOW_ITEM_TYPE_GENEVE,
67         RTE_FLOW_ITEM_TYPE_VXLAN_GPE
68 };
69
70 static enum rte_flow_item_type tunnel_next_items[] = {
71         RTE_FLOW_ITEM_TYPE_ETH,
72         RTE_FLOW_ITEM_TYPE_VLAN
73 };
74
75 struct items_step_mngr {
76         enum rte_flow_item_type *items;
77         int count;
78 };
79
80 static inline void
81 net_addr_to_host(uint32_t *dst, const rte_be32_t *src, size_t len)
82 {
83         size_t i;
84
85         for (i = 0; i < len; i++)
86                 dst[i] = rte_be_to_cpu_32(src[i]);
87 }
88
89 /*
90  * This function is used to find rss general action.
91  * 1. As we know RSS is used to spread packets among several queues, the flow
92  *    API provide the struct rte_flow_action_rss, user could config its field
93  *    sush as: func/level/types/key/queue to control RSS function.
94  * 2. The flow API also supports queue region configuration for hns3. It was
95  *    implemented by FDIR + RSS in hns3 hardware, user can create one FDIR rule
96  *    which action is RSS queues region.
97  * 3. When action is RSS, we use the following rule to distinguish:
98  *    Case 1: pattern have ETH and action's queue_num > 0, indicate it is queue
99  *            region configuration.
100  *    Case other: an rss general action.
101  */
102 static const struct rte_flow_action *
103 hns3_find_rss_general_action(const struct rte_flow_item pattern[],
104                              const struct rte_flow_action actions[])
105 {
106         const struct rte_flow_action *act = NULL;
107         const struct hns3_rss_conf *rss;
108         bool have_eth = false;
109
110         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
111                 if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
112                         act = actions;
113                         break;
114                 }
115         }
116         if (!act)
117                 return NULL;
118
119         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
120                 if (pattern->type == RTE_FLOW_ITEM_TYPE_ETH) {
121                         have_eth = true;
122                         break;
123                 }
124         }
125
126         rss = act->conf;
127         if (have_eth && rss->conf.queue_num) {
128                 /*
129                  * Pattern have ETH and action's queue_num > 0, indicate this is
130                  * queue region configuration.
131                  * Because queue region is implemented by FDIR + RSS in hns3
132                  * hardware, it needs to enter FDIR process, so here return NULL
133                  * to avoid enter RSS process.
134                  */
135                 return NULL;
136         }
137
138         return act;
139 }
140
141 static inline struct hns3_flow_counter *
142 hns3_counter_lookup(struct rte_eth_dev *dev, uint32_t id)
143 {
144         struct hns3_adapter *hns = dev->data->dev_private;
145         struct hns3_pf *pf = &hns->pf;
146         struct hns3_flow_counter *cnt;
147
148         LIST_FOREACH(cnt, &pf->flow_counters, next) {
149                 if (cnt->id == id)
150                         return cnt;
151         }
152         return NULL;
153 }
154
155 static int
156 hns3_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
157                  struct rte_flow_error *error)
158 {
159         struct hns3_adapter *hns = dev->data->dev_private;
160         struct hns3_pf *pf = &hns->pf;
161         struct hns3_hw *hw = &hns->hw;
162         struct hns3_flow_counter *cnt;
163         uint64_t value;
164         int ret;
165
166         cnt = hns3_counter_lookup(dev, id);
167         if (cnt) {
168                 if (!cnt->shared || cnt->shared != shared)
169                         return rte_flow_error_set(error, ENOTSUP,
170                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
171                                 cnt,
172                                 "Counter id is used, shared flag not match");
173                 cnt->ref_cnt++;
174                 return 0;
175         }
176
177         /* Clear the counter by read ops because the counter is read-clear */
178         ret = hns3_get_count(hw, id, &value);
179         if (ret)
180                 return rte_flow_error_set(error, EIO,
181                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
182                                           "Clear counter failed!");
183
184         cnt = rte_zmalloc("hns3 counter", sizeof(*cnt), 0);
185         if (cnt == NULL)
186                 return rte_flow_error_set(error, ENOMEM,
187                                           RTE_FLOW_ERROR_TYPE_HANDLE, cnt,
188                                           "Alloc mem for counter failed");
189         cnt->id = id;
190         cnt->shared = shared;
191         cnt->ref_cnt = 1;
192         cnt->hits = 0;
193         LIST_INSERT_HEAD(&pf->flow_counters, cnt, next);
194         return 0;
195 }
196
197 static int
198 hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow,
199                    struct rte_flow_query_count *qc,
200                    struct rte_flow_error *error)
201 {
202         struct hns3_adapter *hns = dev->data->dev_private;
203         struct hns3_flow_counter *cnt;
204         uint64_t value;
205         int ret;
206
207         /* FDIR is available only in PF driver */
208         if (hns->is_vf)
209                 return rte_flow_error_set(error, ENOTSUP,
210                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
211                                           "Fdir is not supported in VF");
212         cnt = hns3_counter_lookup(dev, flow->counter_id);
213         if (cnt == NULL)
214                 return rte_flow_error_set(error, EINVAL,
215                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
216                                           "Can't find counter id");
217
218         ret = hns3_get_count(&hns->hw, flow->counter_id, &value);
219         if (ret) {
220                 rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE,
221                                    NULL, "Read counter fail.");
222                 return ret;
223         }
224         qc->hits_set = 1;
225         qc->hits = value;
226         qc->bytes_set = 0;
227         qc->bytes = 0;
228
229         return 0;
230 }
231
232 static int
233 hns3_counter_release(struct rte_eth_dev *dev, uint32_t id)
234 {
235         struct hns3_adapter *hns = dev->data->dev_private;
236         struct hns3_hw *hw = &hns->hw;
237         struct hns3_flow_counter *cnt;
238
239         cnt = hns3_counter_lookup(dev, id);
240         if (cnt == NULL) {
241                 hns3_err(hw, "Can't find available counter to release");
242                 return -EINVAL;
243         }
244         cnt->ref_cnt--;
245         if (cnt->ref_cnt == 0) {
246                 LIST_REMOVE(cnt, next);
247                 rte_free(cnt);
248         }
249         return 0;
250 }
251
252 static void
253 hns3_counter_flush(struct rte_eth_dev *dev)
254 {
255         struct hns3_adapter *hns = dev->data->dev_private;
256         struct hns3_pf *pf = &hns->pf;
257         struct hns3_flow_counter *cnt_ptr;
258
259         cnt_ptr = LIST_FIRST(&pf->flow_counters);
260         while (cnt_ptr) {
261                 LIST_REMOVE(cnt_ptr, next);
262                 rte_free(cnt_ptr);
263                 cnt_ptr = LIST_FIRST(&pf->flow_counters);
264         }
265 }
266
267 static int
268 hns3_handle_action_queue(struct rte_eth_dev *dev,
269                          const struct rte_flow_action *action,
270                          struct hns3_fdir_rule *rule,
271                          struct rte_flow_error *error)
272 {
273         struct hns3_adapter *hns = dev->data->dev_private;
274         const struct rte_flow_action_queue *queue;
275         struct hns3_hw *hw = &hns->hw;
276
277         queue = (const struct rte_flow_action_queue *)action->conf;
278         if (queue->index >= hw->used_rx_queues) {
279                 hns3_err(hw, "queue ID(%u) is greater than number of "
280                           "available queue (%u) in driver.",
281                           queue->index, hw->used_rx_queues);
282                 return rte_flow_error_set(error, EINVAL,
283                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
284                                           action, "Invalid queue ID in PF");
285         }
286
287         rule->queue_id = queue->index;
288         rule->nb_queues = 1;
289         rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
290         return 0;
291 }
292
293 static int
294 hns3_handle_action_queue_region(struct rte_eth_dev *dev,
295                                 const struct rte_flow_action *action,
296                                 struct hns3_fdir_rule *rule,
297                                 struct rte_flow_error *error)
298 {
299         struct hns3_adapter *hns = dev->data->dev_private;
300         const struct rte_flow_action_rss *conf = action->conf;
301         struct hns3_hw *hw = &hns->hw;
302         uint16_t idx;
303
304         if (!hns3_dev_fd_queue_region_supported(hw))
305                 return rte_flow_error_set(error, ENOTSUP,
306                         RTE_FLOW_ERROR_TYPE_ACTION, action,
307                         "Not support config queue region!");
308
309         if ((!rte_is_power_of_2(conf->queue_num)) ||
310                 conf->queue_num > hw->rss_size_max ||
311                 conf->queue[0] >= hw->used_rx_queues ||
312                 conf->queue[0] + conf->queue_num > hw->used_rx_queues) {
313                 return rte_flow_error_set(error, EINVAL,
314                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, action,
315                         "Invalid start queue ID and queue num! the start queue "
316                         "ID must valid, the queue num must be power of 2 and "
317                         "<= rss_size_max.");
318         }
319
320         for (idx = 1; idx < conf->queue_num; idx++) {
321                 if (conf->queue[idx] != conf->queue[idx - 1] + 1)
322                         return rte_flow_error_set(error, EINVAL,
323                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF, action,
324                                 "Invalid queue ID sequence! the queue ID "
325                                 "must be continuous increment.");
326         }
327
328         rule->queue_id = conf->queue[0];
329         rule->nb_queues = conf->queue_num;
330         rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
331         return 0;
332 }
333
334 /*
335  * Parse actions structure from the provided pattern.
336  * The pattern is validated as the items are copied.
337  *
338  * @param actions[in]
339  * @param rule[out]
340  *   NIC specfilc actions derived from the actions.
341  * @param error[out]
342  */
343 static int
344 hns3_handle_actions(struct rte_eth_dev *dev,
345                     const struct rte_flow_action actions[],
346                     struct hns3_fdir_rule *rule, struct rte_flow_error *error)
347 {
348         struct hns3_adapter *hns = dev->data->dev_private;
349         const struct rte_flow_action_count *act_count;
350         const struct rte_flow_action_mark *mark;
351         struct hns3_pf *pf = &hns->pf;
352         uint32_t counter_num;
353         int ret;
354
355         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
356                 switch (actions->type) {
357                 case RTE_FLOW_ACTION_TYPE_QUEUE:
358                         ret = hns3_handle_action_queue(dev, actions, rule,
359                                                        error);
360                         if (ret)
361                                 return ret;
362                         break;
363                 case RTE_FLOW_ACTION_TYPE_DROP:
364                         rule->action = HNS3_FD_ACTION_DROP_PACKET;
365                         break;
366                 /*
367                  * Here RSS's real action is queue region.
368                  * Queue region is implemented by FDIR + RSS in hns3 hardware,
369                  * the FDIR's action is one queue region (start_queue_id and
370                  * queue_num), then RSS spread packets to the queue region by
371                  * RSS algorigthm.
372                  */
373                 case RTE_FLOW_ACTION_TYPE_RSS:
374                         ret = hns3_handle_action_queue_region(dev, actions,
375                                                               rule, error);
376                         if (ret)
377                                 return ret;
378                         break;
379                 case RTE_FLOW_ACTION_TYPE_MARK:
380                         mark =
381                             (const struct rte_flow_action_mark *)actions->conf;
382                         if (mark->id >= HNS3_MAX_FILTER_ID)
383                                 return rte_flow_error_set(error, EINVAL,
384                                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
385                                                 actions,
386                                                 "Invalid Mark ID");
387                         rule->fd_id = mark->id;
388                         rule->flags |= HNS3_RULE_FLAG_FDID;
389                         break;
390                 case RTE_FLOW_ACTION_TYPE_FLAG:
391                         rule->fd_id = HNS3_MAX_FILTER_ID;
392                         rule->flags |= HNS3_RULE_FLAG_FDID;
393                         break;
394                 case RTE_FLOW_ACTION_TYPE_COUNT:
395                         act_count =
396                             (const struct rte_flow_action_count *)actions->conf;
397                         counter_num = pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1];
398                         if (act_count->id >= counter_num)
399                                 return rte_flow_error_set(error, EINVAL,
400                                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
401                                                 actions,
402                                                 "Invalid counter id");
403                         rule->act_cnt = *act_count;
404                         rule->flags |= HNS3_RULE_FLAG_COUNTER;
405                         break;
406                 case RTE_FLOW_ACTION_TYPE_VOID:
407                         break;
408                 default:
409                         return rte_flow_error_set(error, ENOTSUP,
410                                                   RTE_FLOW_ERROR_TYPE_ACTION,
411                                                   NULL, "Unsupported action");
412                 }
413         }
414
415         return 0;
416 }
417
418 static int
419 hns3_check_attr(const struct rte_flow_attr *attr, struct rte_flow_error *error)
420 {
421         if (!attr->ingress)
422                 return rte_flow_error_set(error, EINVAL,
423                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
424                                           attr, "Ingress can't be zero");
425         if (attr->egress)
426                 return rte_flow_error_set(error, ENOTSUP,
427                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
428                                           attr, "Not support egress");
429         if (attr->transfer)
430                 return rte_flow_error_set(error, ENOTSUP,
431                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
432                                           attr, "No support for transfer");
433         if (attr->priority)
434                 return rte_flow_error_set(error, ENOTSUP,
435                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
436                                           attr, "Not support priority");
437         if (attr->group)
438                 return rte_flow_error_set(error, ENOTSUP,
439                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
440                                           attr, "Not support group");
441         return 0;
442 }
443
444 static int
445 hns3_parse_eth(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
446                struct rte_flow_error *error __rte_unused)
447 {
448         const struct rte_flow_item_eth *eth_spec;
449         const struct rte_flow_item_eth *eth_mask;
450
451         /* Only used to describe the protocol stack. */
452         if (item->spec == NULL && item->mask == NULL)
453                 return 0;
454
455         if (item->mask) {
456                 eth_mask = item->mask;
457                 if (eth_mask->type) {
458                         hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
459                         rule->key_conf.mask.ether_type =
460                             rte_be_to_cpu_16(eth_mask->type);
461                 }
462                 if (!rte_is_zero_ether_addr(&eth_mask->src)) {
463                         hns3_set_bit(rule->input_set, INNER_SRC_MAC, 1);
464                         memcpy(rule->key_conf.mask.src_mac,
465                                eth_mask->src.addr_bytes, RTE_ETHER_ADDR_LEN);
466                 }
467                 if (!rte_is_zero_ether_addr(&eth_mask->dst)) {
468                         hns3_set_bit(rule->input_set, INNER_DST_MAC, 1);
469                         memcpy(rule->key_conf.mask.dst_mac,
470                                eth_mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN);
471                 }
472         }
473
474         eth_spec = item->spec;
475         rule->key_conf.spec.ether_type = rte_be_to_cpu_16(eth_spec->type);
476         memcpy(rule->key_conf.spec.src_mac, eth_spec->src.addr_bytes,
477                RTE_ETHER_ADDR_LEN);
478         memcpy(rule->key_conf.spec.dst_mac, eth_spec->dst.addr_bytes,
479                RTE_ETHER_ADDR_LEN);
480         return 0;
481 }
482
483 static int
484 hns3_parse_vlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
485                 struct rte_flow_error *error)
486 {
487         const struct rte_flow_item_vlan *vlan_spec;
488         const struct rte_flow_item_vlan *vlan_mask;
489
490         rule->key_conf.vlan_num++;
491         if (rule->key_conf.vlan_num > VLAN_TAG_NUM_MAX)
492                 return rte_flow_error_set(error, EINVAL,
493                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
494                                           "Vlan_num is more than 2");
495
496         /* Only used to describe the protocol stack. */
497         if (item->spec == NULL && item->mask == NULL)
498                 return 0;
499
500         if (item->mask) {
501                 vlan_mask = item->mask;
502                 if (vlan_mask->tci) {
503                         if (rule->key_conf.vlan_num == 1) {
504                                 hns3_set_bit(rule->input_set, INNER_VLAN_TAG1,
505                                              1);
506                                 rule->key_conf.mask.vlan_tag1 =
507                                     rte_be_to_cpu_16(vlan_mask->tci);
508                         } else {
509                                 hns3_set_bit(rule->input_set, INNER_VLAN_TAG2,
510                                              1);
511                                 rule->key_conf.mask.vlan_tag2 =
512                                     rte_be_to_cpu_16(vlan_mask->tci);
513                         }
514                 }
515         }
516
517         vlan_spec = item->spec;
518         if (rule->key_conf.vlan_num == 1)
519                 rule->key_conf.spec.vlan_tag1 =
520                     rte_be_to_cpu_16(vlan_spec->tci);
521         else
522                 rule->key_conf.spec.vlan_tag2 =
523                     rte_be_to_cpu_16(vlan_spec->tci);
524         return 0;
525 }
526
527 static bool
528 hns3_check_ipv4_mask_supported(const struct rte_flow_item_ipv4 *ipv4_mask)
529 {
530         if (ipv4_mask->hdr.total_length || ipv4_mask->hdr.packet_id ||
531             ipv4_mask->hdr.fragment_offset || ipv4_mask->hdr.time_to_live ||
532             ipv4_mask->hdr.hdr_checksum)
533                 return false;
534
535         return true;
536 }
537
538 static int
539 hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
540                 struct rte_flow_error *error)
541 {
542         const struct rte_flow_item_ipv4 *ipv4_spec;
543         const struct rte_flow_item_ipv4 *ipv4_mask;
544
545         hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
546         rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV4;
547         rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
548
549         /* Only used to describe the protocol stack. */
550         if (item->spec == NULL && item->mask == NULL)
551                 return 0;
552
553         if (item->mask) {
554                 ipv4_mask = item->mask;
555                 if (!hns3_check_ipv4_mask_supported(ipv4_mask)) {
556                         return rte_flow_error_set(error, EINVAL,
557                                                   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
558                                                   item,
559                                                   "Only support src & dst ip,tos,proto in IPV4");
560                 }
561
562                 if (ipv4_mask->hdr.src_addr) {
563                         hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
564                         rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID] =
565                             rte_be_to_cpu_32(ipv4_mask->hdr.src_addr);
566                 }
567
568                 if (ipv4_mask->hdr.dst_addr) {
569                         hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
570                         rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID] =
571                             rte_be_to_cpu_32(ipv4_mask->hdr.dst_addr);
572                 }
573
574                 if (ipv4_mask->hdr.type_of_service) {
575                         hns3_set_bit(rule->input_set, INNER_IP_TOS, 1);
576                         rule->key_conf.mask.ip_tos =
577                             ipv4_mask->hdr.type_of_service;
578                 }
579
580                 if (ipv4_mask->hdr.next_proto_id) {
581                         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
582                         rule->key_conf.mask.ip_proto =
583                             ipv4_mask->hdr.next_proto_id;
584                 }
585         }
586
587         ipv4_spec = item->spec;
588         rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID] =
589             rte_be_to_cpu_32(ipv4_spec->hdr.src_addr);
590         rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID] =
591             rte_be_to_cpu_32(ipv4_spec->hdr.dst_addr);
592         rule->key_conf.spec.ip_tos = ipv4_spec->hdr.type_of_service;
593         rule->key_conf.spec.ip_proto = ipv4_spec->hdr.next_proto_id;
594         return 0;
595 }
596
597 static int
598 hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
599                 struct rte_flow_error *error)
600 {
601         const struct rte_flow_item_ipv6 *ipv6_spec;
602         const struct rte_flow_item_ipv6 *ipv6_mask;
603
604         hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
605         rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV6;
606         rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
607
608         /* Only used to describe the protocol stack. */
609         if (item->spec == NULL && item->mask == NULL)
610                 return 0;
611
612         if (item->mask) {
613                 ipv6_mask = item->mask;
614                 if (ipv6_mask->hdr.vtc_flow || ipv6_mask->hdr.payload_len ||
615                     ipv6_mask->hdr.hop_limits) {
616                         return rte_flow_error_set(error, EINVAL,
617                                                   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
618                                                   item,
619                                                   "Only support src & dst ip,proto in IPV6");
620                 }
621                 net_addr_to_host(rule->key_conf.mask.src_ip,
622                                  (const rte_be32_t *)ipv6_mask->hdr.src_addr,
623                                  IP_ADDR_LEN);
624                 net_addr_to_host(rule->key_conf.mask.dst_ip,
625                                  (const rte_be32_t *)ipv6_mask->hdr.dst_addr,
626                                  IP_ADDR_LEN);
627                 rule->key_conf.mask.ip_proto = ipv6_mask->hdr.proto;
628                 if (rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID])
629                         hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
630                 if (rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID])
631                         hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
632                 if (ipv6_mask->hdr.proto)
633                         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
634         }
635
636         ipv6_spec = item->spec;
637         net_addr_to_host(rule->key_conf.spec.src_ip,
638                          (const rte_be32_t *)ipv6_spec->hdr.src_addr,
639                          IP_ADDR_LEN);
640         net_addr_to_host(rule->key_conf.spec.dst_ip,
641                          (const rte_be32_t *)ipv6_spec->hdr.dst_addr,
642                          IP_ADDR_LEN);
643         rule->key_conf.spec.ip_proto = ipv6_spec->hdr.proto;
644
645         return 0;
646 }
647
648 static bool
649 hns3_check_tcp_mask_supported(const struct rte_flow_item_tcp *tcp_mask)
650 {
651         if (tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack ||
652             tcp_mask->hdr.data_off || tcp_mask->hdr.tcp_flags ||
653             tcp_mask->hdr.rx_win || tcp_mask->hdr.cksum ||
654             tcp_mask->hdr.tcp_urp)
655                 return false;
656
657         return true;
658 }
659
660 static int
661 hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
662                struct rte_flow_error *error)
663 {
664         const struct rte_flow_item_tcp *tcp_spec;
665         const struct rte_flow_item_tcp *tcp_mask;
666
667         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
668         rule->key_conf.spec.ip_proto = IPPROTO_TCP;
669         rule->key_conf.mask.ip_proto = IPPROTO_MASK;
670
671         /* Only used to describe the protocol stack. */
672         if (item->spec == NULL && item->mask == NULL)
673                 return 0;
674
675         if (item->mask) {
676                 tcp_mask = item->mask;
677                 if (!hns3_check_tcp_mask_supported(tcp_mask)) {
678                         return rte_flow_error_set(error, EINVAL,
679                                                   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
680                                                   item,
681                                                   "Only support src & dst port in TCP");
682                 }
683
684                 if (tcp_mask->hdr.src_port) {
685                         hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
686                         rule->key_conf.mask.src_port =
687                             rte_be_to_cpu_16(tcp_mask->hdr.src_port);
688                 }
689                 if (tcp_mask->hdr.dst_port) {
690                         hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
691                         rule->key_conf.mask.dst_port =
692                             rte_be_to_cpu_16(tcp_mask->hdr.dst_port);
693                 }
694         }
695
696         tcp_spec = item->spec;
697         rule->key_conf.spec.src_port = rte_be_to_cpu_16(tcp_spec->hdr.src_port);
698         rule->key_conf.spec.dst_port = rte_be_to_cpu_16(tcp_spec->hdr.dst_port);
699
700         return 0;
701 }
702
703 static int
704 hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
705                struct rte_flow_error *error)
706 {
707         const struct rte_flow_item_udp *udp_spec;
708         const struct rte_flow_item_udp *udp_mask;
709
710         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
711         rule->key_conf.spec.ip_proto = IPPROTO_UDP;
712         rule->key_conf.mask.ip_proto = IPPROTO_MASK;
713
714         /* Only used to describe the protocol stack. */
715         if (item->spec == NULL && item->mask == NULL)
716                 return 0;
717
718         if (item->mask) {
719                 udp_mask = item->mask;
720                 if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) {
721                         return rte_flow_error_set(error, EINVAL,
722                                                   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
723                                                   item,
724                                                   "Only support src & dst port in UDP");
725                 }
726                 if (udp_mask->hdr.src_port) {
727                         hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
728                         rule->key_conf.mask.src_port =
729                             rte_be_to_cpu_16(udp_mask->hdr.src_port);
730                 }
731                 if (udp_mask->hdr.dst_port) {
732                         hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
733                         rule->key_conf.mask.dst_port =
734                             rte_be_to_cpu_16(udp_mask->hdr.dst_port);
735                 }
736         }
737
738         udp_spec = item->spec;
739         rule->key_conf.spec.src_port = rte_be_to_cpu_16(udp_spec->hdr.src_port);
740         rule->key_conf.spec.dst_port = rte_be_to_cpu_16(udp_spec->hdr.dst_port);
741
742         return 0;
743 }
744
745 static int
746 hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
747                 struct rte_flow_error *error)
748 {
749         const struct rte_flow_item_sctp *sctp_spec;
750         const struct rte_flow_item_sctp *sctp_mask;
751
752         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
753         rule->key_conf.spec.ip_proto = IPPROTO_SCTP;
754         rule->key_conf.mask.ip_proto = IPPROTO_MASK;
755
756         /* Only used to describe the protocol stack. */
757         if (item->spec == NULL && item->mask == NULL)
758                 return 0;
759
760         if (item->mask) {
761                 sctp_mask = item->mask;
762                 if (sctp_mask->hdr.cksum)
763                         return rte_flow_error_set(error, EINVAL,
764                                                   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
765                                                   item,
766                                                   "Only support src & dst port in SCTP");
767                 if (sctp_mask->hdr.src_port) {
768                         hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
769                         rule->key_conf.mask.src_port =
770                             rte_be_to_cpu_16(sctp_mask->hdr.src_port);
771                 }
772                 if (sctp_mask->hdr.dst_port) {
773                         hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
774                         rule->key_conf.mask.dst_port =
775                             rte_be_to_cpu_16(sctp_mask->hdr.dst_port);
776                 }
777                 if (sctp_mask->hdr.tag) {
778                         hns3_set_bit(rule->input_set, INNER_SCTP_TAG, 1);
779                         rule->key_conf.mask.sctp_tag =
780                             rte_be_to_cpu_32(sctp_mask->hdr.tag);
781                 }
782         }
783
784         sctp_spec = item->spec;
785         rule->key_conf.spec.src_port =
786             rte_be_to_cpu_16(sctp_spec->hdr.src_port);
787         rule->key_conf.spec.dst_port =
788             rte_be_to_cpu_16(sctp_spec->hdr.dst_port);
789         rule->key_conf.spec.sctp_tag = rte_be_to_cpu_32(sctp_spec->hdr.tag);
790
791         return 0;
792 }
793
794 /*
795  * Check items before tunnel, save inner configs to outer configs, and clear
796  * inner configs.
797  * The key consists of two parts: meta_data and tuple keys.
798  * Meta data uses 15 bits, including vlan_num(2bit), des_port(12bit) and tunnel
799  * packet(1bit).
800  * Tuple keys uses 384bit, including ot_dst-mac(48bit), ot_dst-port(16bit),
801  * ot_tun_vni(24bit), ot_flow_id(8bit), src-mac(48bit), dst-mac(48bit),
802  * src-ip(32/128bit), dst-ip(32/128bit), src-port(16bit), dst-port(16bit),
803  * tos(8bit), ether-proto(16bit), ip-proto(8bit), vlantag1(16bit),
804  * Vlantag2(16bit) and sctp-tag(32bit).
805  */
806 static int
807 hns3_handle_tunnel(const struct rte_flow_item *item,
808                    struct hns3_fdir_rule *rule, struct rte_flow_error *error)
809 {
810         /* check eth config */
811         if (rule->input_set & (BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC)))
812                 return rte_flow_error_set(error, EINVAL,
813                                           RTE_FLOW_ERROR_TYPE_ITEM,
814                                           item, "Outer eth mac is unsupported");
815         if (rule->input_set & BIT(INNER_ETH_TYPE)) {
816                 hns3_set_bit(rule->input_set, OUTER_ETH_TYPE, 1);
817                 rule->key_conf.spec.outer_ether_type =
818                     rule->key_conf.spec.ether_type;
819                 rule->key_conf.mask.outer_ether_type =
820                     rule->key_conf.mask.ether_type;
821                 hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 0);
822                 rule->key_conf.spec.ether_type = 0;
823                 rule->key_conf.mask.ether_type = 0;
824         }
825
826         /* check vlan config */
827         if (rule->input_set & (BIT(INNER_VLAN_TAG1) | BIT(INNER_VLAN_TAG2)))
828                 return rte_flow_error_set(error, EINVAL,
829                                           RTE_FLOW_ERROR_TYPE_ITEM,
830                                           item,
831                                           "Outer vlan tags is unsupported");
832
833         /* clear vlan_num for inner vlan select */
834         rule->key_conf.outer_vlan_num = rule->key_conf.vlan_num;
835         rule->key_conf.vlan_num = 0;
836
837         /* check L3 config */
838         if (rule->input_set &
839             (BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | BIT(INNER_IP_TOS)))
840                 return rte_flow_error_set(error, EINVAL,
841                                           RTE_FLOW_ERROR_TYPE_ITEM,
842                                           item, "Outer ip is unsupported");
843         if (rule->input_set & BIT(INNER_IP_PROTO)) {
844                 hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
845                 rule->key_conf.spec.outer_proto = rule->key_conf.spec.ip_proto;
846                 rule->key_conf.mask.outer_proto = rule->key_conf.mask.ip_proto;
847                 hns3_set_bit(rule->input_set, INNER_IP_PROTO, 0);
848                 rule->key_conf.spec.ip_proto = 0;
849                 rule->key_conf.mask.ip_proto = 0;
850         }
851
852         /* check L4 config */
853         if (rule->input_set & BIT(INNER_SCTP_TAG))
854                 return rte_flow_error_set(error, EINVAL,
855                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
856                                           "Outer sctp tag is unsupported");
857
858         if (rule->input_set & BIT(INNER_SRC_PORT)) {
859                 hns3_set_bit(rule->input_set, OUTER_SRC_PORT, 1);
860                 rule->key_conf.spec.outer_src_port =
861                     rule->key_conf.spec.src_port;
862                 rule->key_conf.mask.outer_src_port =
863                     rule->key_conf.mask.src_port;
864                 hns3_set_bit(rule->input_set, INNER_SRC_PORT, 0);
865                 rule->key_conf.spec.src_port = 0;
866                 rule->key_conf.mask.src_port = 0;
867         }
868         if (rule->input_set & BIT(INNER_DST_PORT)) {
869                 hns3_set_bit(rule->input_set, INNER_DST_PORT, 0);
870                 rule->key_conf.spec.dst_port = 0;
871                 rule->key_conf.mask.dst_port = 0;
872         }
873         return 0;
874 }
875
876 static int
877 hns3_parse_vxlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
878                  struct rte_flow_error *error)
879 {
880         const struct rte_flow_item_vxlan *vxlan_spec;
881         const struct rte_flow_item_vxlan *vxlan_mask;
882
883         hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
884         rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
885         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
886                 rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN;
887         else
888                 rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN_GPE;
889
890         /* Only used to describe the protocol stack. */
891         if (item->spec == NULL && item->mask == NULL)
892                 return 0;
893
894         vxlan_mask = item->mask;
895         vxlan_spec = item->spec;
896
897         if (vxlan_mask->flags)
898                 return rte_flow_error_set(error, EINVAL,
899                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
900                                           "Flags is not supported in VxLAN");
901
902         /* VNI must be totally masked or not. */
903         if (memcmp(vxlan_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
904             memcmp(vxlan_mask->vni, zero_mask, VNI_OR_TNI_LEN))
905                 return rte_flow_error_set(error, EINVAL,
906                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
907                                           "VNI must be totally masked or not in VxLAN");
908         if (vxlan_mask->vni[0]) {
909                 hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
910                 memcpy(rule->key_conf.mask.outer_tun_vni, vxlan_mask->vni,
911                            VNI_OR_TNI_LEN);
912         }
913         memcpy(rule->key_conf.spec.outer_tun_vni, vxlan_spec->vni,
914                    VNI_OR_TNI_LEN);
915         return 0;
916 }
917
918 static int
919 hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
920                  struct rte_flow_error *error)
921 {
922         const struct rte_flow_item_nvgre *nvgre_spec;
923         const struct rte_flow_item_nvgre *nvgre_mask;
924
925         hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
926         rule->key_conf.spec.outer_proto = IPPROTO_GRE;
927         rule->key_conf.mask.outer_proto = IPPROTO_MASK;
928
929         hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
930         rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_NVGRE;
931         rule->key_conf.mask.tunnel_type = ~HNS3_TUNNEL_TYPE_NVGRE;
932         /* Only used to describe the protocol stack. */
933         if (item->spec == NULL && item->mask == NULL)
934                 return 0;
935
936         nvgre_mask = item->mask;
937         nvgre_spec = item->spec;
938
939         if (nvgre_mask->protocol || nvgre_mask->c_k_s_rsvd0_ver)
940                 return rte_flow_error_set(error, EINVAL,
941                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
942                                           "Ver/protocal is not supported in NVGRE");
943
944         /* TNI must be totally masked or not. */
945         if (memcmp(nvgre_mask->tni, full_mask, VNI_OR_TNI_LEN) &&
946             memcmp(nvgre_mask->tni, zero_mask, VNI_OR_TNI_LEN))
947                 return rte_flow_error_set(error, EINVAL,
948                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
949                                           "TNI must be totally masked or not in NVGRE");
950
951         if (nvgre_mask->tni[0]) {
952                 hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
953                 memcpy(rule->key_conf.mask.outer_tun_vni, nvgre_mask->tni,
954                            VNI_OR_TNI_LEN);
955         }
956         memcpy(rule->key_conf.spec.outer_tun_vni, nvgre_spec->tni,
957                    VNI_OR_TNI_LEN);
958
959         if (nvgre_mask->flow_id) {
960                 hns3_set_bit(rule->input_set, OUTER_TUN_FLOW_ID, 1);
961                 rule->key_conf.mask.outer_tun_flow_id = nvgre_mask->flow_id;
962         }
963         rule->key_conf.spec.outer_tun_flow_id = nvgre_spec->flow_id;
964         return 0;
965 }
966
967 static int
968 hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
969                   struct rte_flow_error *error)
970 {
971         const struct rte_flow_item_geneve *geneve_spec;
972         const struct rte_flow_item_geneve *geneve_mask;
973
974         hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
975         rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_GENEVE;
976         rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
977         /* Only used to describe the protocol stack. */
978         if (item->spec == NULL && item->mask == NULL)
979                 return 0;
980
981         geneve_mask = item->mask;
982         geneve_spec = item->spec;
983
984         if (geneve_mask->ver_opt_len_o_c_rsvd0 || geneve_mask->protocol)
985                 return rte_flow_error_set(error, EINVAL,
986                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
987                                           "Ver/protocal is not supported in GENEVE");
988         /* VNI must be totally masked or not. */
989         if (memcmp(geneve_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
990             memcmp(geneve_mask->vni, zero_mask, VNI_OR_TNI_LEN))
991                 return rte_flow_error_set(error, EINVAL,
992                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
993                                           "VNI must be totally masked or not in GENEVE");
994         if (geneve_mask->vni[0]) {
995                 hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
996                 memcpy(rule->key_conf.mask.outer_tun_vni, geneve_mask->vni,
997                            VNI_OR_TNI_LEN);
998         }
999         memcpy(rule->key_conf.spec.outer_tun_vni, geneve_spec->vni,
1000                    VNI_OR_TNI_LEN);
1001         return 0;
1002 }
1003
1004 static int
1005 hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1006                   struct rte_flow_error *error)
1007 {
1008         int ret;
1009
1010         if (item->spec == NULL && item->mask)
1011                 return rte_flow_error_set(error, EINVAL,
1012                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1013                                           "Can't configure FDIR with mask "
1014                                           "but without spec");
1015         else if (item->spec && (item->mask == NULL))
1016                 return rte_flow_error_set(error, EINVAL,
1017                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1018                                           "Tunnel packets must configure "
1019                                           "with mask");
1020
1021         switch (item->type) {
1022         case RTE_FLOW_ITEM_TYPE_VXLAN:
1023         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1024                 ret = hns3_parse_vxlan(item, rule, error);
1025                 break;
1026         case RTE_FLOW_ITEM_TYPE_NVGRE:
1027                 ret = hns3_parse_nvgre(item, rule, error);
1028                 break;
1029         case RTE_FLOW_ITEM_TYPE_GENEVE:
1030                 ret = hns3_parse_geneve(item, rule, error);
1031                 break;
1032         default:
1033                 return rte_flow_error_set(error, ENOTSUP,
1034                                           RTE_FLOW_ERROR_TYPE_ITEM,
1035                                           NULL, "Unsupported tunnel type!");
1036         }
1037         if (ret)
1038                 return ret;
1039         return hns3_handle_tunnel(item, rule, error);
1040 }
1041
1042 static int
1043 hns3_parse_normal(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1044                   struct items_step_mngr *step_mngr,
1045                   struct rte_flow_error *error)
1046 {
1047         int ret;
1048
1049         if (item->spec == NULL && item->mask)
1050                 return rte_flow_error_set(error, EINVAL,
1051                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1052                                           "Can't configure FDIR with mask "
1053                                           "but without spec");
1054
1055         switch (item->type) {
1056         case RTE_FLOW_ITEM_TYPE_ETH:
1057                 ret = hns3_parse_eth(item, rule, error);
1058                 step_mngr->items = L2_next_items;
1059                 step_mngr->count = RTE_DIM(L2_next_items);
1060                 break;
1061         case RTE_FLOW_ITEM_TYPE_VLAN:
1062                 ret = hns3_parse_vlan(item, rule, error);
1063                 step_mngr->items = L2_next_items;
1064                 step_mngr->count = RTE_DIM(L2_next_items);
1065                 break;
1066         case RTE_FLOW_ITEM_TYPE_IPV4:
1067                 ret = hns3_parse_ipv4(item, rule, error);
1068                 step_mngr->items = L3_next_items;
1069                 step_mngr->count = RTE_DIM(L3_next_items);
1070                 break;
1071         case RTE_FLOW_ITEM_TYPE_IPV6:
1072                 ret = hns3_parse_ipv6(item, rule, error);
1073                 step_mngr->items = L3_next_items;
1074                 step_mngr->count = RTE_DIM(L3_next_items);
1075                 break;
1076         case RTE_FLOW_ITEM_TYPE_TCP:
1077                 ret = hns3_parse_tcp(item, rule, error);
1078                 step_mngr->items = L4_next_items;
1079                 step_mngr->count = RTE_DIM(L4_next_items);
1080                 break;
1081         case RTE_FLOW_ITEM_TYPE_UDP:
1082                 ret = hns3_parse_udp(item, rule, error);
1083                 step_mngr->items = L4_next_items;
1084                 step_mngr->count = RTE_DIM(L4_next_items);
1085                 break;
1086         case RTE_FLOW_ITEM_TYPE_SCTP:
1087                 ret = hns3_parse_sctp(item, rule, error);
1088                 step_mngr->items = L4_next_items;
1089                 step_mngr->count = RTE_DIM(L4_next_items);
1090                 break;
1091         default:
1092                 return rte_flow_error_set(error, ENOTSUP,
1093                                           RTE_FLOW_ERROR_TYPE_ITEM,
1094                                           NULL, "Unsupported normal type!");
1095         }
1096
1097         return ret;
1098 }
1099
1100 static int
1101 hns3_validate_item(const struct rte_flow_item *item,
1102                    struct items_step_mngr step_mngr,
1103                    struct rte_flow_error *error)
1104 {
1105         int i;
1106
1107         if (item->last)
1108                 return rte_flow_error_set(error, ENOTSUP,
1109                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, item,
1110                                           "Not supported last point for range");
1111
1112         for (i = 0; i < step_mngr.count; i++) {
1113                 if (item->type == step_mngr.items[i])
1114                         break;
1115         }
1116
1117         if (i == step_mngr.count) {
1118                 return rte_flow_error_set(error, EINVAL,
1119                                           RTE_FLOW_ERROR_TYPE_ITEM,
1120                                           item, "Inval or missing item");
1121         }
1122         return 0;
1123 }
1124
1125 static inline bool
1126 is_tunnel_packet(enum rte_flow_item_type type)
1127 {
1128         if (type == RTE_FLOW_ITEM_TYPE_VXLAN_GPE ||
1129             type == RTE_FLOW_ITEM_TYPE_VXLAN ||
1130             type == RTE_FLOW_ITEM_TYPE_NVGRE ||
1131             type == RTE_FLOW_ITEM_TYPE_GENEVE)
1132                 return true;
1133         return false;
1134 }
1135
1136 /*
1137  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1138  * And get the flow director filter info BTW.
1139  * UDP/TCP/SCTP PATTERN:
1140  * The first not void item can be ETH or IPV4 or IPV6
1141  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1142  * The next not void item could be UDP or TCP or SCTP (optional)
1143  * The next not void item could be RAW (for flexbyte, optional)
1144  * The next not void item must be END.
1145  * A Fuzzy Match pattern can appear at any place before END.
1146  * Fuzzy Match is optional for IPV4 but is required for IPV6
1147  * MAC VLAN PATTERN:
1148  * The first not void item must be ETH.
1149  * The second not void item must be MAC VLAN.
1150  * The next not void item must be END.
1151  * ACTION:
1152  * The first not void action should be QUEUE or DROP.
1153  * The second not void optional action should be MARK,
1154  * mark_id is a uint32_t number.
1155  * The next not void action should be END.
1156  * UDP/TCP/SCTP pattern example:
1157  * ITEM         Spec                    Mask
1158  * ETH          NULL                    NULL
1159  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1160  *              dst_addr 192.167.3.50   0xFFFFFFFF
1161  * UDP/TCP/SCTP src_port        80      0xFFFF
1162  *              dst_port        80      0xFFFF
1163  * END
1164  * MAC VLAN pattern example:
1165  * ITEM         Spec                    Mask
1166  * ETH          dst_addr
1167                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1168                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1169  * MAC VLAN     tci     0x2016          0xEFFF
1170  * END
1171  * Other members in mask and spec should set to 0x00.
1172  * Item->last should be NULL.
1173  */
1174 static int
1175 hns3_parse_fdir_filter(struct rte_eth_dev *dev,
1176                        const struct rte_flow_item pattern[],
1177                        const struct rte_flow_action actions[],
1178                        struct hns3_fdir_rule *rule,
1179                        struct rte_flow_error *error)
1180 {
1181         struct hns3_adapter *hns = dev->data->dev_private;
1182         const struct rte_flow_item *item;
1183         struct items_step_mngr step_mngr;
1184         int ret;
1185
1186         /* FDIR is available only in PF driver */
1187         if (hns->is_vf)
1188                 return rte_flow_error_set(error, ENOTSUP,
1189                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1190                                           "Fdir not supported in VF");
1191
1192         step_mngr.items = first_items;
1193         step_mngr.count = RTE_DIM(first_items);
1194         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1195                 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
1196                         continue;
1197
1198                 ret = hns3_validate_item(item, step_mngr, error);
1199                 if (ret)
1200                         return ret;
1201
1202                 if (is_tunnel_packet(item->type)) {
1203                         ret = hns3_parse_tunnel(item, rule, error);
1204                         if (ret)
1205                                 return ret;
1206                         step_mngr.items = tunnel_next_items;
1207                         step_mngr.count = RTE_DIM(tunnel_next_items);
1208                 } else {
1209                         ret = hns3_parse_normal(item, rule, &step_mngr, error);
1210                         if (ret)
1211                                 return ret;
1212                 }
1213         }
1214
1215         return hns3_handle_actions(dev, actions, rule, error);
1216 }
1217
1218 void
1219 hns3_flow_init(struct rte_eth_dev *dev)
1220 {
1221         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1222         struct hns3_process_private *process_list = dev->process_private;
1223         pthread_mutexattr_t attr;
1224
1225         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1226                 pthread_mutexattr_init(&attr);
1227                 pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
1228                 pthread_mutex_init(&hw->flows_lock, &attr);
1229                 dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
1230         }
1231
1232         TAILQ_INIT(&process_list->fdir_list);
1233         TAILQ_INIT(&process_list->filter_rss_list);
1234         TAILQ_INIT(&process_list->flow_list);
1235 }
1236
1237 static void
1238 hns3_filterlist_flush(struct rte_eth_dev *dev)
1239 {
1240         struct hns3_process_private *process_list = dev->process_private;
1241         struct hns3_fdir_rule_ele *fdir_rule_ptr;
1242         struct hns3_rss_conf_ele *rss_filter_ptr;
1243         struct hns3_flow_mem *flow_node;
1244
1245         fdir_rule_ptr = TAILQ_FIRST(&process_list->fdir_list);
1246         while (fdir_rule_ptr) {
1247                 TAILQ_REMOVE(&process_list->fdir_list, fdir_rule_ptr, entries);
1248                 rte_free(fdir_rule_ptr);
1249                 fdir_rule_ptr = TAILQ_FIRST(&process_list->fdir_list);
1250         }
1251
1252         rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
1253         while (rss_filter_ptr) {
1254                 TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr,
1255                              entries);
1256                 rte_free(rss_filter_ptr);
1257                 rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
1258         }
1259
1260         flow_node = TAILQ_FIRST(&process_list->flow_list);
1261         while (flow_node) {
1262                 TAILQ_REMOVE(&process_list->flow_list, flow_node, entries);
1263                 rte_free(flow_node->flow);
1264                 rte_free(flow_node);
1265                 flow_node = TAILQ_FIRST(&process_list->flow_list);
1266         }
1267 }
1268
1269 static bool
1270 hns3_action_rss_same(const struct rte_flow_action_rss *comp,
1271                      const struct rte_flow_action_rss *with)
1272 {
1273         bool func_is_same;
1274
1275         /*
1276          * When user flush all RSS rule, RSS func is set invalid with
1277          * RTE_ETH_HASH_FUNCTION_MAX. Then the user create a flow after
1278          * flushed, any validate RSS func is different with it before
1279          * flushed. Others, when user create an action RSS with RSS func
1280          * specified RTE_ETH_HASH_FUNCTION_DEFAULT, the func is the same
1281          * between continuous RSS flow.
1282          */
1283         if (comp->func == RTE_ETH_HASH_FUNCTION_MAX)
1284                 func_is_same = false;
1285         else
1286                 func_is_same = with->func ? (comp->func == with->func) : true;
1287
1288         return (func_is_same &&
1289                 comp->types == (with->types & HNS3_ETH_RSS_SUPPORT) &&
1290                 comp->level == with->level && comp->key_len == with->key_len &&
1291                 comp->queue_num == with->queue_num &&
1292                 !memcmp(comp->key, with->key, with->key_len) &&
1293                 !memcmp(comp->queue, with->queue,
1294                         sizeof(*with->queue) * with->queue_num));
1295 }
1296
1297 static int
1298 hns3_rss_conf_copy(struct hns3_rss_conf *out,
1299                    const struct rte_flow_action_rss *in)
1300 {
1301         if (in->key_len > RTE_DIM(out->key) ||
1302             in->queue_num > RTE_DIM(out->queue))
1303                 return -EINVAL;
1304         if (in->key == NULL && in->key_len)
1305                 return -EINVAL;
1306         out->conf = (struct rte_flow_action_rss) {
1307                 .func = in->func,
1308                 .level = in->level,
1309                 .types = in->types,
1310                 .key_len = in->key_len,
1311                 .queue_num = in->queue_num,
1312         };
1313         out->conf.queue = memcpy(out->queue, in->queue,
1314                                 sizeof(*in->queue) * in->queue_num);
1315         if (in->key)
1316                 out->conf.key = memcpy(out->key, in->key, in->key_len);
1317
1318         return 0;
1319 }
1320
1321 static bool
1322 hns3_rss_input_tuple_supported(struct hns3_hw *hw,
1323                                const struct rte_flow_action_rss *rss)
1324 {
1325         /*
1326          * For IP packet, it is not supported to use src/dst port fields to RSS
1327          * hash for the following packet types.
1328          * - IPV4 FRAG | IPV4 NONFRAG | IPV6 FRAG | IPV6 NONFRAG
1329          * Besides, for Kunpeng920, the NIC HW is not supported to use src/dst
1330          * port fields to RSS hash for IPV6 SCTP packet type. However, the
1331          * Kunpeng930 and future kunpeng series support to use src/dst port
1332          * fields to RSS hash for IPv6 SCTP packet type.
1333          */
1334         if (rss->types & (ETH_RSS_L4_DST_ONLY | ETH_RSS_L4_SRC_ONLY) &&
1335             (rss->types & ETH_RSS_IP ||
1336             (!hw->rss_info.ipv6_sctp_offload_supported &&
1337             rss->types & ETH_RSS_NONFRAG_IPV6_SCTP)))
1338                 return false;
1339
1340         return true;
1341 }
1342
1343 /*
1344  * This function is used to parse rss action validatation.
1345  */
1346 static int
1347 hns3_parse_rss_filter(struct rte_eth_dev *dev,
1348                       const struct rte_flow_action *actions,
1349                       struct rte_flow_error *error)
1350 {
1351         struct hns3_adapter *hns = dev->data->dev_private;
1352         struct hns3_hw *hw = &hns->hw;
1353         struct hns3_rss_conf *rss_conf = &hw->rss_info;
1354         const struct rte_flow_action_rss *rss;
1355         const struct rte_flow_action *act;
1356         uint32_t act_index = 0;
1357         uint16_t n;
1358
1359         NEXT_ITEM_OF_ACTION(act, actions, act_index);
1360         rss = act->conf;
1361
1362         if (rss == NULL) {
1363                 return rte_flow_error_set(error, EINVAL,
1364                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1365                                           act, "no valid queues");
1366         }
1367
1368         if (rss->queue_num > RTE_DIM(rss_conf->queue))
1369                 return rte_flow_error_set(error, ENOTSUP,
1370                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1371                                           "queue number configured exceeds "
1372                                           "queue buffer size driver supported");
1373
1374         for (n = 0; n < rss->queue_num; n++) {
1375                 if (rss->queue[n] < hw->alloc_rss_size)
1376                         continue;
1377                 return rte_flow_error_set(error, EINVAL,
1378                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1379                                           "queue id must be less than queue number allocated to a TC");
1380         }
1381
1382         if (!(rss->types & HNS3_ETH_RSS_SUPPORT) && rss->types)
1383                 return rte_flow_error_set(error, EINVAL,
1384                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1385                                           act,
1386                                           "Flow types is unsupported by "
1387                                           "hns3's RSS");
1388         if (rss->func >= RTE_ETH_HASH_FUNCTION_MAX)
1389                 return rte_flow_error_set(error, ENOTSUP,
1390                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1391                                           "RSS hash func are not supported");
1392         if (rss->level)
1393                 return rte_flow_error_set(error, ENOTSUP,
1394                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1395                                           "a nonzero RSS encapsulation level is not supported");
1396         if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
1397                 return rte_flow_error_set(error, ENOTSUP,
1398                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1399                                           "RSS hash key must be exactly 40 bytes");
1400
1401         if (!hns3_rss_input_tuple_supported(hw, rss))
1402                 return rte_flow_error_set(error, EINVAL,
1403                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1404                                           &rss->types,
1405                                           "input RSS types are not supported");
1406
1407         act_index++;
1408
1409         /* Check if the next not void action is END */
1410         NEXT_ITEM_OF_ACTION(act, actions, act_index);
1411         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1412                 memset(rss_conf, 0, sizeof(struct hns3_rss_conf));
1413                 return rte_flow_error_set(error, EINVAL,
1414                                           RTE_FLOW_ERROR_TYPE_ACTION,
1415                                           act, "Not supported action.");
1416         }
1417
1418         return 0;
1419 }
1420
1421 static int
1422 hns3_disable_rss(struct hns3_hw *hw)
1423 {
1424         int ret;
1425
1426         /* Redirected the redirection table to queue 0 */
1427         ret = hns3_rss_reset_indir_table(hw);
1428         if (ret)
1429                 return ret;
1430
1431         /* Disable RSS */
1432         hw->rss_info.conf.types = 0;
1433         hw->rss_dis_flag = true;
1434
1435         return 0;
1436 }
1437
1438 static void
1439 hns3_parse_rss_key(struct hns3_hw *hw, struct rte_flow_action_rss *rss_conf)
1440 {
1441         if (rss_conf->key == NULL || rss_conf->key_len < HNS3_RSS_KEY_SIZE) {
1442                 hns3_warn(hw, "Default RSS hash key to be set");
1443                 rss_conf->key = hns3_hash_key;
1444                 rss_conf->key_len = HNS3_RSS_KEY_SIZE;
1445         }
1446 }
1447
1448 static int
1449 hns3_parse_rss_algorithm(struct hns3_hw *hw, enum rte_eth_hash_function *func,
1450                          uint8_t *hash_algo)
1451 {
1452         enum rte_eth_hash_function algo_func = *func;
1453         switch (algo_func) {
1454         case RTE_ETH_HASH_FUNCTION_DEFAULT:
1455                 /* Keep *hash_algo as what it used to be */
1456                 algo_func = hw->rss_info.conf.func;
1457                 break;
1458         case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1459                 *hash_algo = HNS3_RSS_HASH_ALGO_TOEPLITZ;
1460                 break;
1461         case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
1462                 *hash_algo = HNS3_RSS_HASH_ALGO_SIMPLE;
1463                 break;
1464         case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ:
1465                 *hash_algo = HNS3_RSS_HASH_ALGO_SYMMETRIC_TOEP;
1466                 break;
1467         default:
1468                 hns3_err(hw, "Invalid RSS algorithm configuration(%d)",
1469                          algo_func);
1470                 return -EINVAL;
1471         }
1472         *func = algo_func;
1473
1474         return 0;
1475 }
1476
1477 static int
1478 hns3_hw_rss_hash_set(struct hns3_hw *hw, struct rte_flow_action_rss *rss_config)
1479 {
1480         struct hns3_rss_tuple_cfg *tuple;
1481         int ret;
1482
1483         hns3_parse_rss_key(hw, rss_config);
1484
1485         ret = hns3_parse_rss_algorithm(hw, &rss_config->func,
1486                                        &hw->rss_info.hash_algo);
1487         if (ret)
1488                 return ret;
1489
1490         ret = hns3_rss_set_algo_key(hw, rss_config->key);
1491         if (ret)
1492                 return ret;
1493
1494         hw->rss_info.conf.func = rss_config->func;
1495
1496         tuple = &hw->rss_info.rss_tuple_sets;
1497         ret = hns3_set_rss_tuple_by_rss_hf(hw, tuple, rss_config->types);
1498         if (ret)
1499                 hns3_err(hw, "Update RSS tuples by rss hf failed %d", ret);
1500
1501         return ret;
1502 }
1503
1504 static int
1505 hns3_update_indir_table(struct rte_eth_dev *dev,
1506                         const struct rte_flow_action_rss *conf, uint16_t num)
1507 {
1508         struct hns3_adapter *hns = dev->data->dev_private;
1509         struct hns3_hw *hw = &hns->hw;
1510         uint16_t indir_tbl[HNS3_RSS_IND_TBL_SIZE_MAX];
1511         uint16_t j;
1512         uint32_t i;
1513
1514         /* Fill in redirection table */
1515         memcpy(indir_tbl, hw->rss_info.rss_indirection_tbl,
1516                sizeof(hw->rss_info.rss_indirection_tbl));
1517         for (i = 0, j = 0; i < hw->rss_ind_tbl_size; i++, j++) {
1518                 j %= num;
1519                 if (conf->queue[j] >= hw->alloc_rss_size) {
1520                         hns3_err(hw, "queue id(%u) set to redirection table "
1521                                  "exceeds queue number(%u) allocated to a TC.",
1522                                  conf->queue[j], hw->alloc_rss_size);
1523                         return -EINVAL;
1524                 }
1525                 indir_tbl[i] = conf->queue[j];
1526         }
1527
1528         return hns3_set_rss_indir_table(hw, indir_tbl, hw->rss_ind_tbl_size);
1529 }
1530
1531 static int
1532 hns3_config_rss_filter(struct rte_eth_dev *dev,
1533                        const struct hns3_rss_conf *conf, bool add)
1534 {
1535         struct hns3_process_private *process_list = dev->process_private;
1536         struct hns3_adapter *hns = dev->data->dev_private;
1537         struct hns3_rss_conf_ele *rss_filter_ptr;
1538         struct hns3_hw *hw = &hns->hw;
1539         struct hns3_rss_conf *rss_info;
1540         uint64_t flow_types;
1541         uint16_t num;
1542         int ret;
1543
1544         struct rte_flow_action_rss rss_flow_conf = {
1545                 .func = conf->conf.func,
1546                 .level = conf->conf.level,
1547                 .types = conf->conf.types,
1548                 .key_len = conf->conf.key_len,
1549                 .queue_num = conf->conf.queue_num,
1550                 .key = conf->conf.key_len ?
1551                     (void *)(uintptr_t)conf->conf.key : NULL,
1552                 .queue = conf->conf.queue,
1553         };
1554
1555         /* Filter the unsupported flow types */
1556         flow_types = conf->conf.types ?
1557                      rss_flow_conf.types & HNS3_ETH_RSS_SUPPORT :
1558                      hw->rss_info.conf.types;
1559         if (flow_types != rss_flow_conf.types)
1560                 hns3_warn(hw, "modified RSS types based on hardware support, "
1561                               "requested:0x%" PRIx64 " configured:0x%" PRIx64,
1562                           rss_flow_conf.types, flow_types);
1563         /* Update the useful flow types */
1564         rss_flow_conf.types = flow_types;
1565
1566         rss_info = &hw->rss_info;
1567         if (!add) {
1568                 if (!conf->valid)
1569                         return 0;
1570
1571                 ret = hns3_disable_rss(hw);
1572                 if (ret) {
1573                         hns3_err(hw, "RSS disable failed(%d)", ret);
1574                         return ret;
1575                 }
1576
1577                 if (rss_flow_conf.queue_num) {
1578                         /*
1579                          * Due the content of queue pointer have been reset to
1580                          * 0, the rss_info->conf.queue should be set to NULL
1581                          */
1582                         rss_info->conf.queue = NULL;
1583                         rss_info->conf.queue_num = 0;
1584                 }
1585
1586                 /* set RSS func invalid after flushed */
1587                 rss_info->conf.func = RTE_ETH_HASH_FUNCTION_MAX;
1588                 return 0;
1589         }
1590
1591         /* Set rx queues to use */
1592         num = RTE_MIN(dev->data->nb_rx_queues, rss_flow_conf.queue_num);
1593         if (rss_flow_conf.queue_num > num)
1594                 hns3_warn(hw, "Config queue numbers %u are beyond the scope of truncated",
1595                           rss_flow_conf.queue_num);
1596         hns3_info(hw, "Max of contiguous %u PF queues are configured", num);
1597
1598         rte_spinlock_lock(&hw->lock);
1599         if (num) {
1600                 ret = hns3_update_indir_table(dev, &rss_flow_conf, num);
1601                 if (ret)
1602                         goto rss_config_err;
1603         }
1604
1605         /* Set hash algorithm and flow types by the user's config */
1606         ret = hns3_hw_rss_hash_set(hw, &rss_flow_conf);
1607         if (ret)
1608                 goto rss_config_err;
1609
1610         ret = hns3_rss_conf_copy(rss_info, &rss_flow_conf);
1611         if (ret) {
1612                 hns3_err(hw, "RSS config init fail(%d)", ret);
1613                 goto rss_config_err;
1614         }
1615
1616         /*
1617          * When create a new RSS rule, the old rule will be overlaid and set
1618          * invalid.
1619          */
1620         TAILQ_FOREACH(rss_filter_ptr, &process_list->filter_rss_list, entries)
1621                 rss_filter_ptr->filter_info.valid = false;
1622
1623 rss_config_err:
1624         rte_spinlock_unlock(&hw->lock);
1625
1626         return ret;
1627 }
1628
1629 static int
1630 hns3_clear_rss_filter(struct rte_eth_dev *dev)
1631 {
1632         struct hns3_process_private *process_list = dev->process_private;
1633         struct hns3_adapter *hns = dev->data->dev_private;
1634         struct hns3_rss_conf_ele *rss_filter_ptr;
1635         struct hns3_hw *hw = &hns->hw;
1636         int rss_rule_succ_cnt = 0; /* count for success of clearing RSS rules */
1637         int rss_rule_fail_cnt = 0; /* count for failure of clearing RSS rules */
1638         int ret = 0;
1639
1640         rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
1641         while (rss_filter_ptr) {
1642                 TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr,
1643                              entries);
1644                 ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info,
1645                                              false);
1646                 if (ret)
1647                         rss_rule_fail_cnt++;
1648                 else
1649                         rss_rule_succ_cnt++;
1650                 rte_free(rss_filter_ptr);
1651                 rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
1652         }
1653
1654         if (rss_rule_fail_cnt) {
1655                 hns3_err(hw, "fail to delete all RSS filters, success num = %d "
1656                              "fail num = %d", rss_rule_succ_cnt,
1657                              rss_rule_fail_cnt);
1658                 ret = -EIO;
1659         }
1660
1661         return ret;
1662 }
1663
1664 int
1665 hns3_restore_rss_filter(struct rte_eth_dev *dev)
1666 {
1667         struct hns3_adapter *hns = dev->data->dev_private;
1668         struct hns3_hw *hw = &hns->hw;
1669
1670         /* When user flush all rules, it doesn't need to restore RSS rule */
1671         if (hw->rss_info.conf.func == RTE_ETH_HASH_FUNCTION_MAX)
1672                 return 0;
1673
1674         return hns3_config_rss_filter(dev, &hw->rss_info, true);
1675 }
1676
1677 static int
1678 hns3_flow_parse_rss(struct rte_eth_dev *dev,
1679                     const struct hns3_rss_conf *conf, bool add)
1680 {
1681         struct hns3_adapter *hns = dev->data->dev_private;
1682         struct hns3_hw *hw = &hns->hw;
1683         bool ret;
1684
1685         ret = hns3_action_rss_same(&hw->rss_info.conf, &conf->conf);
1686         if (ret) {
1687                 hns3_err(hw, "Enter duplicate RSS configuration : %d", ret);
1688                 return -EINVAL;
1689         }
1690
1691         return hns3_config_rss_filter(dev, conf, add);
1692 }
1693
1694 static int
1695 hns3_flow_args_check(const struct rte_flow_attr *attr,
1696                      const struct rte_flow_item pattern[],
1697                      const struct rte_flow_action actions[],
1698                      struct rte_flow_error *error)
1699 {
1700         if (pattern == NULL)
1701                 return rte_flow_error_set(error, EINVAL,
1702                                           RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1703                                           NULL, "NULL pattern.");
1704
1705         if (actions == NULL)
1706                 return rte_flow_error_set(error, EINVAL,
1707                                           RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1708                                           NULL, "NULL action.");
1709
1710         if (attr == NULL)
1711                 return rte_flow_error_set(error, EINVAL,
1712                                           RTE_FLOW_ERROR_TYPE_ATTR,
1713                                           NULL, "NULL attribute.");
1714
1715         return hns3_check_attr(attr, error);
1716 }
1717
1718 /*
1719  * Check if the flow rule is supported by hns3.
1720  * It only checkes the format. Don't guarantee the rule can be programmed into
1721  * the HW. Because there can be no enough room for the rule.
1722  */
1723 static int
1724 hns3_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1725                    const struct rte_flow_item pattern[],
1726                    const struct rte_flow_action actions[],
1727                    struct rte_flow_error *error)
1728 {
1729         struct hns3_fdir_rule fdir_rule;
1730         int ret;
1731
1732         ret = hns3_flow_args_check(attr, pattern, actions, error);
1733         if (ret)
1734                 return ret;
1735
1736         if (hns3_find_rss_general_action(pattern, actions))
1737                 return hns3_parse_rss_filter(dev, actions, error);
1738
1739         memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
1740         return hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
1741 }
1742
1743 /*
1744  * Create or destroy a flow rule.
1745  * Theorically one rule can match more than one filters.
1746  * We will let it use the filter which it hit first.
1747  * So, the sequence matters.
1748  */
1749 static struct rte_flow *
1750 hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1751                  const struct rte_flow_item pattern[],
1752                  const struct rte_flow_action actions[],
1753                  struct rte_flow_error *error)
1754 {
1755         struct hns3_process_private *process_list = dev->process_private;
1756         struct hns3_adapter *hns = dev->data->dev_private;
1757         struct hns3_hw *hw = &hns->hw;
1758         const struct hns3_rss_conf *rss_conf;
1759         struct hns3_fdir_rule_ele *fdir_rule_ptr;
1760         struct hns3_rss_conf_ele *rss_filter_ptr;
1761         struct hns3_flow_mem *flow_node;
1762         const struct rte_flow_action *act;
1763         struct rte_flow *flow;
1764         struct hns3_fdir_rule fdir_rule;
1765         int ret;
1766
1767         ret = hns3_flow_validate(dev, attr, pattern, actions, error);
1768         if (ret)
1769                 return NULL;
1770
1771         flow = rte_zmalloc("hns3 flow", sizeof(struct rte_flow), 0);
1772         if (flow == NULL) {
1773                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1774                                    NULL, "Failed to allocate flow memory");
1775                 return NULL;
1776         }
1777         flow_node = rte_zmalloc("hns3 flow node",
1778                                 sizeof(struct hns3_flow_mem), 0);
1779         if (flow_node == NULL) {
1780                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1781                                    NULL, "Failed to allocate flow list memory");
1782                 rte_free(flow);
1783                 return NULL;
1784         }
1785
1786         flow_node->flow = flow;
1787         TAILQ_INSERT_TAIL(&process_list->flow_list, flow_node, entries);
1788
1789         act = hns3_find_rss_general_action(pattern, actions);
1790         if (act) {
1791                 rss_conf = act->conf;
1792
1793                 ret = hns3_flow_parse_rss(dev, rss_conf, true);
1794                 if (ret)
1795                         goto err;
1796
1797                 rss_filter_ptr = rte_zmalloc("hns3 rss filter",
1798                                              sizeof(struct hns3_rss_conf_ele),
1799                                              0);
1800                 if (rss_filter_ptr == NULL) {
1801                         hns3_err(hw,
1802                                     "Failed to allocate hns3_rss_filter memory");
1803                         ret = -ENOMEM;
1804                         goto err;
1805                 }
1806                 hns3_rss_conf_copy(&rss_filter_ptr->filter_info,
1807                                    &rss_conf->conf);
1808                 rss_filter_ptr->filter_info.valid = true;
1809                 TAILQ_INSERT_TAIL(&process_list->filter_rss_list,
1810                                   rss_filter_ptr, entries);
1811
1812                 flow->rule = rss_filter_ptr;
1813                 flow->filter_type = RTE_ETH_FILTER_HASH;
1814                 return flow;
1815         }
1816
1817         memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
1818         ret = hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
1819         if (ret)
1820                 goto out;
1821
1822         if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) {
1823                 ret = hns3_counter_new(dev, fdir_rule.act_cnt.shared,
1824                                        fdir_rule.act_cnt.id, error);
1825                 if (ret)
1826                         goto out;
1827
1828                 flow->counter_id = fdir_rule.act_cnt.id;
1829         }
1830
1831         fdir_rule_ptr = rte_zmalloc("hns3 fdir rule",
1832                                     sizeof(struct hns3_fdir_rule_ele),
1833                                     0);
1834         if (fdir_rule_ptr == NULL) {
1835                 hns3_err(hw, "failed to allocate fdir_rule memory.");
1836                 ret = -ENOMEM;
1837                 goto err_fdir;
1838         }
1839
1840         ret = hns3_fdir_filter_program(hns, &fdir_rule, false);
1841         if (!ret) {
1842                 memcpy(&fdir_rule_ptr->fdir_conf, &fdir_rule,
1843                         sizeof(struct hns3_fdir_rule));
1844                 TAILQ_INSERT_TAIL(&process_list->fdir_list,
1845                                   fdir_rule_ptr, entries);
1846                 flow->rule = fdir_rule_ptr;
1847                 flow->filter_type = RTE_ETH_FILTER_FDIR;
1848
1849                 return flow;
1850         }
1851
1852         rte_free(fdir_rule_ptr);
1853 err_fdir:
1854         if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
1855                 hns3_counter_release(dev, fdir_rule.act_cnt.id);
1856 err:
1857         rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1858                            "Failed to create flow");
1859 out:
1860         TAILQ_REMOVE(&process_list->flow_list, flow_node, entries);
1861         rte_free(flow_node);
1862         rte_free(flow);
1863         return NULL;
1864 }
1865
1866 /* Destroy a flow rule on hns3. */
1867 static int
1868 hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1869                   struct rte_flow_error *error)
1870 {
1871         struct hns3_process_private *process_list = dev->process_private;
1872         struct hns3_adapter *hns = dev->data->dev_private;
1873         struct hns3_fdir_rule_ele *fdir_rule_ptr;
1874         struct hns3_rss_conf_ele *rss_filter_ptr;
1875         struct hns3_flow_mem *flow_node;
1876         enum rte_filter_type filter_type;
1877         struct hns3_fdir_rule fdir_rule;
1878         int ret;
1879
1880         if (flow == NULL)
1881                 return rte_flow_error_set(error, EINVAL,
1882                                           RTE_FLOW_ERROR_TYPE_HANDLE,
1883                                           flow, "Flow is NULL");
1884
1885         filter_type = flow->filter_type;
1886         switch (filter_type) {
1887         case RTE_ETH_FILTER_FDIR:
1888                 fdir_rule_ptr = (struct hns3_fdir_rule_ele *)flow->rule;
1889                 memcpy(&fdir_rule, &fdir_rule_ptr->fdir_conf,
1890                            sizeof(struct hns3_fdir_rule));
1891
1892                 ret = hns3_fdir_filter_program(hns, &fdir_rule, true);
1893                 if (ret)
1894                         return rte_flow_error_set(error, EIO,
1895                                                   RTE_FLOW_ERROR_TYPE_HANDLE,
1896                                                   flow,
1897                                                   "Destroy FDIR fail.Try again");
1898                 if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
1899                         hns3_counter_release(dev, fdir_rule.act_cnt.id);
1900                 TAILQ_REMOVE(&process_list->fdir_list, fdir_rule_ptr, entries);
1901                 rte_free(fdir_rule_ptr);
1902                 fdir_rule_ptr = NULL;
1903                 break;
1904         case RTE_ETH_FILTER_HASH:
1905                 rss_filter_ptr = (struct hns3_rss_conf_ele *)flow->rule;
1906                 ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info,
1907                                              false);
1908                 if (ret)
1909                         return rte_flow_error_set(error, EIO,
1910                                                   RTE_FLOW_ERROR_TYPE_HANDLE,
1911                                                   flow,
1912                                                   "Destroy RSS fail.Try again");
1913                 TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr,
1914                              entries);
1915                 rte_free(rss_filter_ptr);
1916                 rss_filter_ptr = NULL;
1917                 break;
1918         default:
1919                 return rte_flow_error_set(error, EINVAL,
1920                                           RTE_FLOW_ERROR_TYPE_HANDLE, flow,
1921                                           "Unsupported filter type");
1922         }
1923
1924         TAILQ_FOREACH(flow_node, &process_list->flow_list, entries) {
1925                 if (flow_node->flow == flow) {
1926                         TAILQ_REMOVE(&process_list->flow_list, flow_node,
1927                                      entries);
1928                         rte_free(flow_node);
1929                         flow_node = NULL;
1930                         break;
1931                 }
1932         }
1933         rte_free(flow);
1934         flow = NULL;
1935
1936         return 0;
1937 }
1938
1939 /*  Destroy all flow rules associated with a port on hns3. */
1940 static int
1941 hns3_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1942 {
1943         struct hns3_adapter *hns = dev->data->dev_private;
1944         int ret;
1945
1946         /* FDIR is available only in PF driver */
1947         if (!hns->is_vf) {
1948                 ret = hns3_clear_all_fdir_filter(hns);
1949                 if (ret) {
1950                         rte_flow_error_set(error, ret,
1951                                            RTE_FLOW_ERROR_TYPE_HANDLE,
1952                                            NULL, "Failed to flush rule");
1953                         return ret;
1954                 }
1955                 hns3_counter_flush(dev);
1956         }
1957
1958         ret = hns3_clear_rss_filter(dev);
1959         if (ret) {
1960                 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1961                                    NULL, "Failed to flush rss filter");
1962                 return ret;
1963         }
1964
1965         hns3_filterlist_flush(dev);
1966
1967         return 0;
1968 }
1969
1970 /* Query an existing flow rule. */
1971 static int
1972 hns3_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
1973                 const struct rte_flow_action *actions, void *data,
1974                 struct rte_flow_error *error)
1975 {
1976         struct rte_flow_action_rss *rss_conf;
1977         struct hns3_rss_conf_ele *rss_rule;
1978         struct rte_flow_query_count *qc;
1979         int ret;
1980
1981         if (!flow->rule)
1982                 return rte_flow_error_set(error, EINVAL,
1983                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "invalid rule");
1984
1985         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1986                 switch (actions->type) {
1987                 case RTE_FLOW_ACTION_TYPE_VOID:
1988                         break;
1989                 case RTE_FLOW_ACTION_TYPE_COUNT:
1990                         qc = (struct rte_flow_query_count *)data;
1991                         ret = hns3_counter_query(dev, flow, qc, error);
1992                         if (ret)
1993                                 return ret;
1994                         break;
1995                 case RTE_FLOW_ACTION_TYPE_RSS:
1996                         if (flow->filter_type != RTE_ETH_FILTER_HASH) {
1997                                 return rte_flow_error_set(error, ENOTSUP,
1998                                         RTE_FLOW_ERROR_TYPE_ACTION,
1999                                         actions, "action is not supported");
2000                         }
2001                         rss_conf = (struct rte_flow_action_rss *)data;
2002                         rss_rule = (struct hns3_rss_conf_ele *)flow->rule;
2003                         rte_memcpy(rss_conf, &rss_rule->filter_info.conf,
2004                                    sizeof(struct rte_flow_action_rss));
2005                         break;
2006                 default:
2007                         return rte_flow_error_set(error, ENOTSUP,
2008                                 RTE_FLOW_ERROR_TYPE_ACTION,
2009                                 actions, "action is not supported");
2010                 }
2011         }
2012
2013         return 0;
2014 }
2015
2016 static int
2017 hns3_flow_validate_wrap(struct rte_eth_dev *dev,
2018                         const struct rte_flow_attr *attr,
2019                         const struct rte_flow_item pattern[],
2020                         const struct rte_flow_action actions[],
2021                         struct rte_flow_error *error)
2022 {
2023         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2024         int ret;
2025
2026         pthread_mutex_lock(&hw->flows_lock);
2027         ret = hns3_flow_validate(dev, attr, pattern, actions, error);
2028         pthread_mutex_unlock(&hw->flows_lock);
2029
2030         return ret;
2031 }
2032
2033 static struct rte_flow *
2034 hns3_flow_create_wrap(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
2035                       const struct rte_flow_item pattern[],
2036                       const struct rte_flow_action actions[],
2037                       struct rte_flow_error *error)
2038 {
2039         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2040         struct rte_flow *flow;
2041
2042         pthread_mutex_lock(&hw->flows_lock);
2043         flow = hns3_flow_create(dev, attr, pattern, actions, error);
2044         pthread_mutex_unlock(&hw->flows_lock);
2045
2046         return flow;
2047 }
2048
2049 static int
2050 hns3_flow_destroy_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
2051                        struct rte_flow_error *error)
2052 {
2053         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2054         int ret;
2055
2056         pthread_mutex_lock(&hw->flows_lock);
2057         ret = hns3_flow_destroy(dev, flow, error);
2058         pthread_mutex_unlock(&hw->flows_lock);
2059
2060         return ret;
2061 }
2062
2063 static int
2064 hns3_flow_flush_wrap(struct rte_eth_dev *dev, struct rte_flow_error *error)
2065 {
2066         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2067         int ret;
2068
2069         pthread_mutex_lock(&hw->flows_lock);
2070         ret = hns3_flow_flush(dev, error);
2071         pthread_mutex_unlock(&hw->flows_lock);
2072
2073         return ret;
2074 }
2075
2076 static int
2077 hns3_flow_query_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
2078                      const struct rte_flow_action *actions, void *data,
2079                      struct rte_flow_error *error)
2080 {
2081         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2082         int ret;
2083
2084         pthread_mutex_lock(&hw->flows_lock);
2085         ret = hns3_flow_query(dev, flow, actions, data, error);
2086         pthread_mutex_unlock(&hw->flows_lock);
2087
2088         return ret;
2089 }
2090
2091 static const struct rte_flow_ops hns3_flow_ops = {
2092         .validate = hns3_flow_validate_wrap,
2093         .create = hns3_flow_create_wrap,
2094         .destroy = hns3_flow_destroy_wrap,
2095         .flush = hns3_flow_flush_wrap,
2096         .query = hns3_flow_query_wrap,
2097         .isolate = NULL,
2098 };
2099
2100 int
2101 hns3_dev_flow_ops_get(struct rte_eth_dev *dev,
2102                       const struct rte_flow_ops **ops)
2103 {
2104         struct hns3_hw *hw;
2105
2106         hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2107         if (hw->adapter_state >= HNS3_NIC_CLOSED)
2108                 return -ENODEV;
2109
2110         *ops = &hns3_flow_ops;
2111         return 0;
2112 }