net/hns3: fix copyright date
[dpdk.git] / drivers / net / hns3 / hns3_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 HiSilicon Limited.
3  */
4
5 #include <rte_flow_driver.h>
6 #include <rte_io.h>
7 #include <rte_malloc.h>
8
9 #include "hns3_ethdev.h"
10 #include "hns3_logs.h"
11
12 /* Default default keys */
13 static uint8_t hns3_hash_key[] = {
14         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
15         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
16         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
17         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
18         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
19 };
20
21 static const uint8_t full_mask[VNI_OR_TNI_LEN] = { 0xFF, 0xFF, 0xFF };
22 static const uint8_t zero_mask[VNI_OR_TNI_LEN] = { 0x00, 0x00, 0x00 };
23
24 /* Special Filter id for non-specific packet flagging. Don't change value */
25 #define HNS3_MAX_FILTER_ID      0x0FFF
26
27 #define ETHER_TYPE_MASK         0xFFFF
28 #define IPPROTO_MASK            0xFF
29 #define TUNNEL_TYPE_MASK        0xFFFF
30
31 #define HNS3_TUNNEL_TYPE_VXLAN          0x12B5
32 #define HNS3_TUNNEL_TYPE_VXLAN_GPE      0x12B6
33 #define HNS3_TUNNEL_TYPE_GENEVE         0x17C1
34 #define HNS3_TUNNEL_TYPE_NVGRE          0x6558
35
36 static enum rte_flow_item_type first_items[] = {
37         RTE_FLOW_ITEM_TYPE_ETH,
38         RTE_FLOW_ITEM_TYPE_IPV4,
39         RTE_FLOW_ITEM_TYPE_IPV6,
40         RTE_FLOW_ITEM_TYPE_TCP,
41         RTE_FLOW_ITEM_TYPE_UDP,
42         RTE_FLOW_ITEM_TYPE_SCTP,
43         RTE_FLOW_ITEM_TYPE_ICMP,
44         RTE_FLOW_ITEM_TYPE_NVGRE,
45         RTE_FLOW_ITEM_TYPE_VXLAN,
46         RTE_FLOW_ITEM_TYPE_GENEVE,
47         RTE_FLOW_ITEM_TYPE_VXLAN_GPE
48 };
49
50 static enum rte_flow_item_type L2_next_items[] = {
51         RTE_FLOW_ITEM_TYPE_VLAN,
52         RTE_FLOW_ITEM_TYPE_IPV4,
53         RTE_FLOW_ITEM_TYPE_IPV6
54 };
55
56 static enum rte_flow_item_type L3_next_items[] = {
57         RTE_FLOW_ITEM_TYPE_TCP,
58         RTE_FLOW_ITEM_TYPE_UDP,
59         RTE_FLOW_ITEM_TYPE_SCTP,
60         RTE_FLOW_ITEM_TYPE_NVGRE,
61         RTE_FLOW_ITEM_TYPE_ICMP
62 };
63
64 static enum rte_flow_item_type L4_next_items[] = {
65         RTE_FLOW_ITEM_TYPE_VXLAN,
66         RTE_FLOW_ITEM_TYPE_GENEVE,
67         RTE_FLOW_ITEM_TYPE_VXLAN_GPE
68 };
69
70 static enum rte_flow_item_type tunnel_next_items[] = {
71         RTE_FLOW_ITEM_TYPE_ETH,
72         RTE_FLOW_ITEM_TYPE_VLAN
73 };
74
75 struct items_step_mngr {
76         enum rte_flow_item_type *items;
77         int count;
78 };
79
80 static inline void
81 net_addr_to_host(uint32_t *dst, const rte_be32_t *src, size_t len)
82 {
83         size_t i;
84
85         for (i = 0; i < len; i++)
86                 dst[i] = rte_be_to_cpu_32(src[i]);
87 }
88
89 /*
90  * This function is used to find rss general action.
91  * 1. As we know RSS is used to spread packets among several queues, the flow
92  *    API provide the struct rte_flow_action_rss, user could config its field
93  *    sush as: func/level/types/key/queue to control RSS function.
94  * 2. The flow API also supports queue region configuration for hns3. It was
95  *    implemented by FDIR + RSS in hns3 hardware, user can create one FDIR rule
96  *    which action is RSS queues region.
97  * 3. When action is RSS, we use the following rule to distinguish:
98  *    Case 1: pattern have ETH and action's queue_num > 0, indicate it is queue
99  *            region configuration.
100  *    Case other: an rss general action.
101  */
102 static const struct rte_flow_action *
103 hns3_find_rss_general_action(const struct rte_flow_item pattern[],
104                              const struct rte_flow_action actions[])
105 {
106         const struct rte_flow_action *act = NULL;
107         const struct hns3_rss_conf *rss;
108         bool have_eth = false;
109
110         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
111                 if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
112                         act = actions;
113                         break;
114                 }
115         }
116         if (!act)
117                 return NULL;
118
119         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
120                 if (pattern->type == RTE_FLOW_ITEM_TYPE_ETH) {
121                         have_eth = true;
122                         break;
123                 }
124         }
125
126         rss = act->conf;
127         if (have_eth && rss->conf.queue_num) {
128                 /*
129                  * Pattern have ETH and action's queue_num > 0, indicate this is
130                  * queue region configuration.
131                  * Because queue region is implemented by FDIR + RSS in hns3
132                  * hardware, it needs to enter FDIR process, so here return NULL
133                  * to avoid enter RSS process.
134                  */
135                 return NULL;
136         }
137
138         return act;
139 }
140
141 static inline struct hns3_flow_counter *
142 hns3_counter_lookup(struct rte_eth_dev *dev, uint32_t id)
143 {
144         struct hns3_adapter *hns = dev->data->dev_private;
145         struct hns3_pf *pf = &hns->pf;
146         struct hns3_flow_counter *cnt;
147
148         LIST_FOREACH(cnt, &pf->flow_counters, next) {
149                 if (cnt->id == id)
150                         return cnt;
151         }
152         return NULL;
153 }
154
155 static int
156 hns3_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
157                  struct rte_flow_error *error)
158 {
159         struct hns3_adapter *hns = dev->data->dev_private;
160         struct hns3_pf *pf = &hns->pf;
161         struct hns3_flow_counter *cnt;
162
163         cnt = hns3_counter_lookup(dev, id);
164         if (cnt) {
165                 if (!cnt->shared || cnt->shared != shared)
166                         return rte_flow_error_set(error, ENOTSUP,
167                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
168                                 cnt,
169                                 "Counter id is used, shared flag not match");
170                 cnt->ref_cnt++;
171                 return 0;
172         }
173
174         cnt = rte_zmalloc("hns3 counter", sizeof(*cnt), 0);
175         if (cnt == NULL)
176                 return rte_flow_error_set(error, ENOMEM,
177                                           RTE_FLOW_ERROR_TYPE_HANDLE, cnt,
178                                           "Alloc mem for counter failed");
179         cnt->id = id;
180         cnt->shared = shared;
181         cnt->ref_cnt = 1;
182         cnt->hits = 0;
183         LIST_INSERT_HEAD(&pf->flow_counters, cnt, next);
184         return 0;
185 }
186
187 static int
188 hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow,
189                    struct rte_flow_query_count *qc,
190                    struct rte_flow_error *error)
191 {
192         struct hns3_adapter *hns = dev->data->dev_private;
193         struct hns3_flow_counter *cnt;
194         uint64_t value;
195         int ret;
196
197         /* FDIR is available only in PF driver */
198         if (hns->is_vf)
199                 return rte_flow_error_set(error, ENOTSUP,
200                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
201                                           "Fdir is not supported in VF");
202         cnt = hns3_counter_lookup(dev, flow->counter_id);
203         if (cnt == NULL)
204                 return rte_flow_error_set(error, EINVAL,
205                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
206                                           "Can't find counter id");
207
208         ret = hns3_get_count(&hns->hw, flow->counter_id, &value);
209         if (ret) {
210                 rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE,
211                                    NULL, "Read counter fail.");
212                 return ret;
213         }
214         qc->hits_set = 1;
215         qc->hits = value;
216
217         return 0;
218 }
219
220 static int
221 hns3_counter_release(struct rte_eth_dev *dev, uint32_t id)
222 {
223         struct hns3_adapter *hns = dev->data->dev_private;
224         struct hns3_hw *hw = &hns->hw;
225         struct hns3_flow_counter *cnt;
226
227         cnt = hns3_counter_lookup(dev, id);
228         if (cnt == NULL) {
229                 hns3_err(hw, "Can't find available counter to release");
230                 return -EINVAL;
231         }
232         cnt->ref_cnt--;
233         if (cnt->ref_cnt == 0) {
234                 LIST_REMOVE(cnt, next);
235                 rte_free(cnt);
236         }
237         return 0;
238 }
239
240 static void
241 hns3_counter_flush(struct rte_eth_dev *dev)
242 {
243         struct hns3_adapter *hns = dev->data->dev_private;
244         struct hns3_pf *pf = &hns->pf;
245         struct hns3_flow_counter *cnt_ptr;
246
247         cnt_ptr = LIST_FIRST(&pf->flow_counters);
248         while (cnt_ptr) {
249                 LIST_REMOVE(cnt_ptr, next);
250                 rte_free(cnt_ptr);
251                 cnt_ptr = LIST_FIRST(&pf->flow_counters);
252         }
253 }
254
255 static int
256 hns3_handle_action_queue(struct rte_eth_dev *dev,
257                          const struct rte_flow_action *action,
258                          struct hns3_fdir_rule *rule,
259                          struct rte_flow_error *error)
260 {
261         struct hns3_adapter *hns = dev->data->dev_private;
262         const struct rte_flow_action_queue *queue;
263         struct hns3_hw *hw = &hns->hw;
264
265         queue = (const struct rte_flow_action_queue *)action->conf;
266         if (queue->index >= hw->used_rx_queues) {
267                 hns3_err(hw, "queue ID(%u) is greater than number of "
268                           "available queue (%u) in driver.",
269                           queue->index, hw->used_rx_queues);
270                 return rte_flow_error_set(error, EINVAL,
271                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
272                                           action, "Invalid queue ID in PF");
273         }
274
275         rule->queue_id = queue->index;
276         rule->nb_queues = 1;
277         rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
278         return 0;
279 }
280
281 static int
282 hns3_handle_action_queue_region(struct rte_eth_dev *dev,
283                                 const struct rte_flow_action *action,
284                                 struct hns3_fdir_rule *rule,
285                                 struct rte_flow_error *error)
286 {
287         struct hns3_adapter *hns = dev->data->dev_private;
288         const struct rte_flow_action_rss *conf = action->conf;
289         struct hns3_hw *hw = &hns->hw;
290         uint16_t idx;
291
292         if (!hns3_dev_fd_queue_region_supported(hw))
293                 return rte_flow_error_set(error, ENOTSUP,
294                         RTE_FLOW_ERROR_TYPE_ACTION, action,
295                         "Not support config queue region!");
296
297         if ((!rte_is_power_of_2(conf->queue_num)) ||
298                 conf->queue_num > hw->rss_size_max ||
299                 conf->queue[0] >= hw->used_rx_queues ||
300                 conf->queue[0] + conf->queue_num > hw->used_rx_queues) {
301                 return rte_flow_error_set(error, EINVAL,
302                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, action,
303                         "Invalid start queue ID and queue num! the start queue "
304                         "ID must valid, the queue num must be power of 2 and "
305                         "<= rss_size_max.");
306         }
307
308         for (idx = 1; idx < conf->queue_num; idx++) {
309                 if (conf->queue[idx] != conf->queue[idx - 1] + 1)
310                         return rte_flow_error_set(error, EINVAL,
311                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF, action,
312                                 "Invalid queue ID sequence! the queue ID "
313                                 "must be continuous increment.");
314         }
315
316         rule->queue_id = conf->queue[0];
317         rule->nb_queues = conf->queue_num;
318         rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
319         return 0;
320 }
321
322 /*
323  * Parse actions structure from the provided pattern.
324  * The pattern is validated as the items are copied.
325  *
326  * @param actions[in]
327  * @param rule[out]
328  *   NIC specfilc actions derived from the actions.
329  * @param error[out]
330  */
331 static int
332 hns3_handle_actions(struct rte_eth_dev *dev,
333                     const struct rte_flow_action actions[],
334                     struct hns3_fdir_rule *rule, struct rte_flow_error *error)
335 {
336         struct hns3_adapter *hns = dev->data->dev_private;
337         const struct rte_flow_action_count *act_count;
338         const struct rte_flow_action_mark *mark;
339         struct hns3_pf *pf = &hns->pf;
340         uint32_t counter_num;
341         int ret;
342
343         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
344                 switch (actions->type) {
345                 case RTE_FLOW_ACTION_TYPE_QUEUE:
346                         ret = hns3_handle_action_queue(dev, actions, rule,
347                                                        error);
348                         if (ret)
349                                 return ret;
350                         break;
351                 case RTE_FLOW_ACTION_TYPE_DROP:
352                         rule->action = HNS3_FD_ACTION_DROP_PACKET;
353                         break;
354                 /*
355                  * Here RSS's real action is queue region.
356                  * Queue region is implemented by FDIR + RSS in hns3 hardware,
357                  * the FDIR's action is one queue region (start_queue_id and
358                  * queue_num), then RSS spread packets to the queue region by
359                  * RSS algorigthm.
360                  */
361                 case RTE_FLOW_ACTION_TYPE_RSS:
362                         ret = hns3_handle_action_queue_region(dev, actions,
363                                                               rule, error);
364                         if (ret)
365                                 return ret;
366                         break;
367                 case RTE_FLOW_ACTION_TYPE_MARK:
368                         mark =
369                             (const struct rte_flow_action_mark *)actions->conf;
370                         if (mark->id >= HNS3_MAX_FILTER_ID)
371                                 return rte_flow_error_set(error, EINVAL,
372                                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
373                                                 actions,
374                                                 "Invalid Mark ID");
375                         rule->fd_id = mark->id;
376                         rule->flags |= HNS3_RULE_FLAG_FDID;
377                         break;
378                 case RTE_FLOW_ACTION_TYPE_FLAG:
379                         rule->fd_id = HNS3_MAX_FILTER_ID;
380                         rule->flags |= HNS3_RULE_FLAG_FDID;
381                         break;
382                 case RTE_FLOW_ACTION_TYPE_COUNT:
383                         act_count =
384                             (const struct rte_flow_action_count *)actions->conf;
385                         counter_num = pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1];
386                         if (act_count->id >= counter_num)
387                                 return rte_flow_error_set(error, EINVAL,
388                                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
389                                                 actions,
390                                                 "Invalid counter id");
391                         rule->act_cnt = *act_count;
392                         rule->flags |= HNS3_RULE_FLAG_COUNTER;
393                         break;
394                 case RTE_FLOW_ACTION_TYPE_VOID:
395                         break;
396                 default:
397                         return rte_flow_error_set(error, ENOTSUP,
398                                                   RTE_FLOW_ERROR_TYPE_ACTION,
399                                                   NULL, "Unsupported action");
400                 }
401         }
402
403         return 0;
404 }
405
406 static int
407 hns3_check_attr(const struct rte_flow_attr *attr, struct rte_flow_error *error)
408 {
409         if (!attr->ingress)
410                 return rte_flow_error_set(error, EINVAL,
411                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
412                                           attr, "Ingress can't be zero");
413         if (attr->egress)
414                 return rte_flow_error_set(error, ENOTSUP,
415                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
416                                           attr, "Not support egress");
417         if (attr->transfer)
418                 return rte_flow_error_set(error, ENOTSUP,
419                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
420                                           attr, "No support for transfer");
421         if (attr->priority)
422                 return rte_flow_error_set(error, ENOTSUP,
423                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
424                                           attr, "Not support priority");
425         if (attr->group)
426                 return rte_flow_error_set(error, ENOTSUP,
427                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
428                                           attr, "Not support group");
429         return 0;
430 }
431
432 static int
433 hns3_parse_eth(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
434                struct rte_flow_error *error __rte_unused)
435 {
436         const struct rte_flow_item_eth *eth_spec;
437         const struct rte_flow_item_eth *eth_mask;
438
439         /* Only used to describe the protocol stack. */
440         if (item->spec == NULL && item->mask == NULL)
441                 return 0;
442
443         if (item->mask) {
444                 eth_mask = item->mask;
445                 if (eth_mask->type) {
446                         hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
447                         rule->key_conf.mask.ether_type =
448                             rte_be_to_cpu_16(eth_mask->type);
449                 }
450                 if (!rte_is_zero_ether_addr(&eth_mask->src)) {
451                         hns3_set_bit(rule->input_set, INNER_SRC_MAC, 1);
452                         memcpy(rule->key_conf.mask.src_mac,
453                                eth_mask->src.addr_bytes, RTE_ETHER_ADDR_LEN);
454                 }
455                 if (!rte_is_zero_ether_addr(&eth_mask->dst)) {
456                         hns3_set_bit(rule->input_set, INNER_DST_MAC, 1);
457                         memcpy(rule->key_conf.mask.dst_mac,
458                                eth_mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN);
459                 }
460         }
461
462         eth_spec = item->spec;
463         rule->key_conf.spec.ether_type = rte_be_to_cpu_16(eth_spec->type);
464         memcpy(rule->key_conf.spec.src_mac, eth_spec->src.addr_bytes,
465                RTE_ETHER_ADDR_LEN);
466         memcpy(rule->key_conf.spec.dst_mac, eth_spec->dst.addr_bytes,
467                RTE_ETHER_ADDR_LEN);
468         return 0;
469 }
470
471 static int
472 hns3_parse_vlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
473                 struct rte_flow_error *error)
474 {
475         const struct rte_flow_item_vlan *vlan_spec;
476         const struct rte_flow_item_vlan *vlan_mask;
477
478         rule->key_conf.vlan_num++;
479         if (rule->key_conf.vlan_num > VLAN_TAG_NUM_MAX)
480                 return rte_flow_error_set(error, EINVAL,
481                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
482                                           "Vlan_num is more than 2");
483
484         /* Only used to describe the protocol stack. */
485         if (item->spec == NULL && item->mask == NULL)
486                 return 0;
487
488         if (item->mask) {
489                 vlan_mask = item->mask;
490                 if (vlan_mask->tci) {
491                         if (rule->key_conf.vlan_num == 1) {
492                                 hns3_set_bit(rule->input_set, INNER_VLAN_TAG1,
493                                              1);
494                                 rule->key_conf.mask.vlan_tag1 =
495                                     rte_be_to_cpu_16(vlan_mask->tci);
496                         } else {
497                                 hns3_set_bit(rule->input_set, INNER_VLAN_TAG2,
498                                              1);
499                                 rule->key_conf.mask.vlan_tag2 =
500                                     rte_be_to_cpu_16(vlan_mask->tci);
501                         }
502                 }
503         }
504
505         vlan_spec = item->spec;
506         if (rule->key_conf.vlan_num == 1)
507                 rule->key_conf.spec.vlan_tag1 =
508                     rte_be_to_cpu_16(vlan_spec->tci);
509         else
510                 rule->key_conf.spec.vlan_tag2 =
511                     rte_be_to_cpu_16(vlan_spec->tci);
512         return 0;
513 }
514
515 static bool
516 hns3_check_ipv4_mask_supported(const struct rte_flow_item_ipv4 *ipv4_mask)
517 {
518         if (ipv4_mask->hdr.total_length || ipv4_mask->hdr.packet_id ||
519             ipv4_mask->hdr.fragment_offset || ipv4_mask->hdr.time_to_live ||
520             ipv4_mask->hdr.hdr_checksum)
521                 return false;
522
523         return true;
524 }
525
526 static int
527 hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
528                 struct rte_flow_error *error)
529 {
530         const struct rte_flow_item_ipv4 *ipv4_spec;
531         const struct rte_flow_item_ipv4 *ipv4_mask;
532
533         hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
534         rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV4;
535         rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
536
537         /* Only used to describe the protocol stack. */
538         if (item->spec == NULL && item->mask == NULL)
539                 return 0;
540
541         if (item->mask) {
542                 ipv4_mask = item->mask;
543                 if (!hns3_check_ipv4_mask_supported(ipv4_mask)) {
544                         return rte_flow_error_set(error, EINVAL,
545                                                   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
546                                                   item,
547                                                   "Only support src & dst ip,tos,proto in IPV4");
548                 }
549
550                 if (ipv4_mask->hdr.src_addr) {
551                         hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
552                         rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID] =
553                             rte_be_to_cpu_32(ipv4_mask->hdr.src_addr);
554                 }
555
556                 if (ipv4_mask->hdr.dst_addr) {
557                         hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
558                         rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID] =
559                             rte_be_to_cpu_32(ipv4_mask->hdr.dst_addr);
560                 }
561
562                 if (ipv4_mask->hdr.type_of_service) {
563                         hns3_set_bit(rule->input_set, INNER_IP_TOS, 1);
564                         rule->key_conf.mask.ip_tos =
565                             ipv4_mask->hdr.type_of_service;
566                 }
567
568                 if (ipv4_mask->hdr.next_proto_id) {
569                         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
570                         rule->key_conf.mask.ip_proto =
571                             ipv4_mask->hdr.next_proto_id;
572                 }
573         }
574
575         ipv4_spec = item->spec;
576         rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID] =
577             rte_be_to_cpu_32(ipv4_spec->hdr.src_addr);
578         rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID] =
579             rte_be_to_cpu_32(ipv4_spec->hdr.dst_addr);
580         rule->key_conf.spec.ip_tos = ipv4_spec->hdr.type_of_service;
581         rule->key_conf.spec.ip_proto = ipv4_spec->hdr.next_proto_id;
582         return 0;
583 }
584
585 static int
586 hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
587                 struct rte_flow_error *error)
588 {
589         const struct rte_flow_item_ipv6 *ipv6_spec;
590         const struct rte_flow_item_ipv6 *ipv6_mask;
591
592         hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
593         rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV6;
594         rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
595
596         /* Only used to describe the protocol stack. */
597         if (item->spec == NULL && item->mask == NULL)
598                 return 0;
599
600         if (item->mask) {
601                 ipv6_mask = item->mask;
602                 if (ipv6_mask->hdr.vtc_flow || ipv6_mask->hdr.payload_len ||
603                     ipv6_mask->hdr.hop_limits) {
604                         return rte_flow_error_set(error, EINVAL,
605                                                   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
606                                                   item,
607                                                   "Only support src & dst ip,proto in IPV6");
608                 }
609                 net_addr_to_host(rule->key_conf.mask.src_ip,
610                                  (const rte_be32_t *)ipv6_mask->hdr.src_addr,
611                                  IP_ADDR_LEN);
612                 net_addr_to_host(rule->key_conf.mask.dst_ip,
613                                  (const rte_be32_t *)ipv6_mask->hdr.dst_addr,
614                                  IP_ADDR_LEN);
615                 rule->key_conf.mask.ip_proto = ipv6_mask->hdr.proto;
616                 if (rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID])
617                         hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
618                 if (rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID])
619                         hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
620                 if (ipv6_mask->hdr.proto)
621                         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
622         }
623
624         ipv6_spec = item->spec;
625         net_addr_to_host(rule->key_conf.spec.src_ip,
626                          (const rte_be32_t *)ipv6_spec->hdr.src_addr,
627                          IP_ADDR_LEN);
628         net_addr_to_host(rule->key_conf.spec.dst_ip,
629                          (const rte_be32_t *)ipv6_spec->hdr.dst_addr,
630                          IP_ADDR_LEN);
631         rule->key_conf.spec.ip_proto = ipv6_spec->hdr.proto;
632
633         return 0;
634 }
635
636 static bool
637 hns3_check_tcp_mask_supported(const struct rte_flow_item_tcp *tcp_mask)
638 {
639         if (tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack ||
640             tcp_mask->hdr.data_off || tcp_mask->hdr.tcp_flags ||
641             tcp_mask->hdr.rx_win || tcp_mask->hdr.cksum ||
642             tcp_mask->hdr.tcp_urp)
643                 return false;
644
645         return true;
646 }
647
648 static int
649 hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
650                struct rte_flow_error *error)
651 {
652         const struct rte_flow_item_tcp *tcp_spec;
653         const struct rte_flow_item_tcp *tcp_mask;
654
655         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
656         rule->key_conf.spec.ip_proto = IPPROTO_TCP;
657         rule->key_conf.mask.ip_proto = IPPROTO_MASK;
658
659         /* Only used to describe the protocol stack. */
660         if (item->spec == NULL && item->mask == NULL)
661                 return 0;
662
663         if (item->mask) {
664                 tcp_mask = item->mask;
665                 if (!hns3_check_tcp_mask_supported(tcp_mask)) {
666                         return rte_flow_error_set(error, EINVAL,
667                                                   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
668                                                   item,
669                                                   "Only support src & dst port in TCP");
670                 }
671
672                 if (tcp_mask->hdr.src_port) {
673                         hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
674                         rule->key_conf.mask.src_port =
675                             rte_be_to_cpu_16(tcp_mask->hdr.src_port);
676                 }
677                 if (tcp_mask->hdr.dst_port) {
678                         hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
679                         rule->key_conf.mask.dst_port =
680                             rte_be_to_cpu_16(tcp_mask->hdr.dst_port);
681                 }
682         }
683
684         tcp_spec = item->spec;
685         rule->key_conf.spec.src_port = rte_be_to_cpu_16(tcp_spec->hdr.src_port);
686         rule->key_conf.spec.dst_port = rte_be_to_cpu_16(tcp_spec->hdr.dst_port);
687
688         return 0;
689 }
690
691 static int
692 hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
693                struct rte_flow_error *error)
694 {
695         const struct rte_flow_item_udp *udp_spec;
696         const struct rte_flow_item_udp *udp_mask;
697
698         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
699         rule->key_conf.spec.ip_proto = IPPROTO_UDP;
700         rule->key_conf.mask.ip_proto = IPPROTO_MASK;
701
702         /* Only used to describe the protocol stack. */
703         if (item->spec == NULL && item->mask == NULL)
704                 return 0;
705
706         if (item->mask) {
707                 udp_mask = item->mask;
708                 if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) {
709                         return rte_flow_error_set(error, EINVAL,
710                                                   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
711                                                   item,
712                                                   "Only support src & dst port in UDP");
713                 }
714                 if (udp_mask->hdr.src_port) {
715                         hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
716                         rule->key_conf.mask.src_port =
717                             rte_be_to_cpu_16(udp_mask->hdr.src_port);
718                 }
719                 if (udp_mask->hdr.dst_port) {
720                         hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
721                         rule->key_conf.mask.dst_port =
722                             rte_be_to_cpu_16(udp_mask->hdr.dst_port);
723                 }
724         }
725
726         udp_spec = item->spec;
727         rule->key_conf.spec.src_port = rte_be_to_cpu_16(udp_spec->hdr.src_port);
728         rule->key_conf.spec.dst_port = rte_be_to_cpu_16(udp_spec->hdr.dst_port);
729
730         return 0;
731 }
732
733 static int
734 hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
735                 struct rte_flow_error *error)
736 {
737         const struct rte_flow_item_sctp *sctp_spec;
738         const struct rte_flow_item_sctp *sctp_mask;
739
740         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
741         rule->key_conf.spec.ip_proto = IPPROTO_SCTP;
742         rule->key_conf.mask.ip_proto = IPPROTO_MASK;
743
744         /* Only used to describe the protocol stack. */
745         if (item->spec == NULL && item->mask == NULL)
746                 return 0;
747
748         if (item->mask) {
749                 sctp_mask = item->mask;
750                 if (sctp_mask->hdr.cksum)
751                         return rte_flow_error_set(error, EINVAL,
752                                                   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
753                                                   item,
754                                                   "Only support src & dst port in SCTP");
755                 if (sctp_mask->hdr.src_port) {
756                         hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
757                         rule->key_conf.mask.src_port =
758                             rte_be_to_cpu_16(sctp_mask->hdr.src_port);
759                 }
760                 if (sctp_mask->hdr.dst_port) {
761                         hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
762                         rule->key_conf.mask.dst_port =
763                             rte_be_to_cpu_16(sctp_mask->hdr.dst_port);
764                 }
765                 if (sctp_mask->hdr.tag) {
766                         hns3_set_bit(rule->input_set, INNER_SCTP_TAG, 1);
767                         rule->key_conf.mask.sctp_tag =
768                             rte_be_to_cpu_32(sctp_mask->hdr.tag);
769                 }
770         }
771
772         sctp_spec = item->spec;
773         rule->key_conf.spec.src_port =
774             rte_be_to_cpu_16(sctp_spec->hdr.src_port);
775         rule->key_conf.spec.dst_port =
776             rte_be_to_cpu_16(sctp_spec->hdr.dst_port);
777         rule->key_conf.spec.sctp_tag = rte_be_to_cpu_32(sctp_spec->hdr.tag);
778
779         return 0;
780 }
781
782 /*
783  * Check items before tunnel, save inner configs to outer configs, and clear
784  * inner configs.
785  * The key consists of two parts: meta_data and tuple keys.
786  * Meta data uses 15 bits, including vlan_num(2bit), des_port(12bit) and tunnel
787  * packet(1bit).
788  * Tuple keys uses 384bit, including ot_dst-mac(48bit), ot_dst-port(16bit),
789  * ot_tun_vni(24bit), ot_flow_id(8bit), src-mac(48bit), dst-mac(48bit),
790  * src-ip(32/128bit), dst-ip(32/128bit), src-port(16bit), dst-port(16bit),
791  * tos(8bit), ether-proto(16bit), ip-proto(8bit), vlantag1(16bit),
792  * Vlantag2(16bit) and sctp-tag(32bit).
793  */
794 static int
795 hns3_handle_tunnel(const struct rte_flow_item *item,
796                    struct hns3_fdir_rule *rule, struct rte_flow_error *error)
797 {
798         /* check eth config */
799         if (rule->input_set & (BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC)))
800                 return rte_flow_error_set(error, EINVAL,
801                                           RTE_FLOW_ERROR_TYPE_ITEM,
802                                           item, "Outer eth mac is unsupported");
803         if (rule->input_set & BIT(INNER_ETH_TYPE)) {
804                 hns3_set_bit(rule->input_set, OUTER_ETH_TYPE, 1);
805                 rule->key_conf.spec.outer_ether_type =
806                     rule->key_conf.spec.ether_type;
807                 rule->key_conf.mask.outer_ether_type =
808                     rule->key_conf.mask.ether_type;
809                 hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 0);
810                 rule->key_conf.spec.ether_type = 0;
811                 rule->key_conf.mask.ether_type = 0;
812         }
813
814         /* check vlan config */
815         if (rule->input_set & (BIT(INNER_VLAN_TAG1) | BIT(INNER_VLAN_TAG2)))
816                 return rte_flow_error_set(error, EINVAL,
817                                           RTE_FLOW_ERROR_TYPE_ITEM,
818                                           item,
819                                           "Outer vlan tags is unsupported");
820
821         /* clear vlan_num for inner vlan select */
822         rule->key_conf.outer_vlan_num = rule->key_conf.vlan_num;
823         rule->key_conf.vlan_num = 0;
824
825         /* check L3 config */
826         if (rule->input_set &
827             (BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | BIT(INNER_IP_TOS)))
828                 return rte_flow_error_set(error, EINVAL,
829                                           RTE_FLOW_ERROR_TYPE_ITEM,
830                                           item, "Outer ip is unsupported");
831         if (rule->input_set & BIT(INNER_IP_PROTO)) {
832                 hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
833                 rule->key_conf.spec.outer_proto = rule->key_conf.spec.ip_proto;
834                 rule->key_conf.mask.outer_proto = rule->key_conf.mask.ip_proto;
835                 hns3_set_bit(rule->input_set, INNER_IP_PROTO, 0);
836                 rule->key_conf.spec.ip_proto = 0;
837                 rule->key_conf.mask.ip_proto = 0;
838         }
839
840         /* check L4 config */
841         if (rule->input_set & BIT(INNER_SCTP_TAG))
842                 return rte_flow_error_set(error, EINVAL,
843                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
844                                           "Outer sctp tag is unsupported");
845
846         if (rule->input_set & BIT(INNER_SRC_PORT)) {
847                 hns3_set_bit(rule->input_set, OUTER_SRC_PORT, 1);
848                 rule->key_conf.spec.outer_src_port =
849                     rule->key_conf.spec.src_port;
850                 rule->key_conf.mask.outer_src_port =
851                     rule->key_conf.mask.src_port;
852                 hns3_set_bit(rule->input_set, INNER_SRC_PORT, 0);
853                 rule->key_conf.spec.src_port = 0;
854                 rule->key_conf.mask.src_port = 0;
855         }
856         if (rule->input_set & BIT(INNER_DST_PORT)) {
857                 hns3_set_bit(rule->input_set, INNER_DST_PORT, 0);
858                 rule->key_conf.spec.dst_port = 0;
859                 rule->key_conf.mask.dst_port = 0;
860         }
861         return 0;
862 }
863
864 static int
865 hns3_parse_vxlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
866                  struct rte_flow_error *error)
867 {
868         const struct rte_flow_item_vxlan *vxlan_spec;
869         const struct rte_flow_item_vxlan *vxlan_mask;
870
871         hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
872         rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
873         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
874                 rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN;
875         else
876                 rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN_GPE;
877
878         /* Only used to describe the protocol stack. */
879         if (item->spec == NULL && item->mask == NULL)
880                 return 0;
881
882         vxlan_mask = item->mask;
883         vxlan_spec = item->spec;
884
885         if (vxlan_mask->flags)
886                 return rte_flow_error_set(error, EINVAL,
887                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
888                                           "Flags is not supported in VxLAN");
889
890         /* VNI must be totally masked or not. */
891         if (memcmp(vxlan_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
892             memcmp(vxlan_mask->vni, zero_mask, VNI_OR_TNI_LEN))
893                 return rte_flow_error_set(error, EINVAL,
894                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
895                                           "VNI must be totally masked or not in VxLAN");
896         if (vxlan_mask->vni[0]) {
897                 hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
898                 memcpy(rule->key_conf.mask.outer_tun_vni, vxlan_mask->vni,
899                            VNI_OR_TNI_LEN);
900         }
901         memcpy(rule->key_conf.spec.outer_tun_vni, vxlan_spec->vni,
902                    VNI_OR_TNI_LEN);
903         return 0;
904 }
905
906 static int
907 hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
908                  struct rte_flow_error *error)
909 {
910         const struct rte_flow_item_nvgre *nvgre_spec;
911         const struct rte_flow_item_nvgre *nvgre_mask;
912
913         hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
914         rule->key_conf.spec.outer_proto = IPPROTO_GRE;
915         rule->key_conf.mask.outer_proto = IPPROTO_MASK;
916
917         hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
918         rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_NVGRE;
919         rule->key_conf.mask.tunnel_type = ~HNS3_TUNNEL_TYPE_NVGRE;
920         /* Only used to describe the protocol stack. */
921         if (item->spec == NULL && item->mask == NULL)
922                 return 0;
923
924         nvgre_mask = item->mask;
925         nvgre_spec = item->spec;
926
927         if (nvgre_mask->protocol || nvgre_mask->c_k_s_rsvd0_ver)
928                 return rte_flow_error_set(error, EINVAL,
929                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
930                                           "Ver/protocal is not supported in NVGRE");
931
932         /* TNI must be totally masked or not. */
933         if (memcmp(nvgre_mask->tni, full_mask, VNI_OR_TNI_LEN) &&
934             memcmp(nvgre_mask->tni, zero_mask, VNI_OR_TNI_LEN))
935                 return rte_flow_error_set(error, EINVAL,
936                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
937                                           "TNI must be totally masked or not in NVGRE");
938
939         if (nvgre_mask->tni[0]) {
940                 hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
941                 memcpy(rule->key_conf.mask.outer_tun_vni, nvgre_mask->tni,
942                            VNI_OR_TNI_LEN);
943         }
944         memcpy(rule->key_conf.spec.outer_tun_vni, nvgre_spec->tni,
945                    VNI_OR_TNI_LEN);
946
947         if (nvgre_mask->flow_id) {
948                 hns3_set_bit(rule->input_set, OUTER_TUN_FLOW_ID, 1);
949                 rule->key_conf.mask.outer_tun_flow_id = nvgre_mask->flow_id;
950         }
951         rule->key_conf.spec.outer_tun_flow_id = nvgre_spec->flow_id;
952         return 0;
953 }
954
955 static int
956 hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
957                   struct rte_flow_error *error)
958 {
959         const struct rte_flow_item_geneve *geneve_spec;
960         const struct rte_flow_item_geneve *geneve_mask;
961
962         hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
963         rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_GENEVE;
964         rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
965         /* Only used to describe the protocol stack. */
966         if (item->spec == NULL && item->mask == NULL)
967                 return 0;
968
969         geneve_mask = item->mask;
970         geneve_spec = item->spec;
971
972         if (geneve_mask->ver_opt_len_o_c_rsvd0 || geneve_mask->protocol)
973                 return rte_flow_error_set(error, EINVAL,
974                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
975                                           "Ver/protocal is not supported in GENEVE");
976         /* VNI must be totally masked or not. */
977         if (memcmp(geneve_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
978             memcmp(geneve_mask->vni, zero_mask, VNI_OR_TNI_LEN))
979                 return rte_flow_error_set(error, EINVAL,
980                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
981                                           "VNI must be totally masked or not in GENEVE");
982         if (geneve_mask->vni[0]) {
983                 hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
984                 memcpy(rule->key_conf.mask.outer_tun_vni, geneve_mask->vni,
985                            VNI_OR_TNI_LEN);
986         }
987         memcpy(rule->key_conf.spec.outer_tun_vni, geneve_spec->vni,
988                    VNI_OR_TNI_LEN);
989         return 0;
990 }
991
992 static int
993 hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
994                   struct rte_flow_error *error)
995 {
996         int ret;
997
998         if (item->spec == NULL && item->mask)
999                 return rte_flow_error_set(error, EINVAL,
1000                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1001                                           "Can't configure FDIR with mask "
1002                                           "but without spec");
1003         else if (item->spec && (item->mask == NULL))
1004                 return rte_flow_error_set(error, EINVAL,
1005                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1006                                           "Tunnel packets must configure "
1007                                           "with mask");
1008
1009         switch (item->type) {
1010         case RTE_FLOW_ITEM_TYPE_VXLAN:
1011         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1012                 ret = hns3_parse_vxlan(item, rule, error);
1013                 break;
1014         case RTE_FLOW_ITEM_TYPE_NVGRE:
1015                 ret = hns3_parse_nvgre(item, rule, error);
1016                 break;
1017         case RTE_FLOW_ITEM_TYPE_GENEVE:
1018                 ret = hns3_parse_geneve(item, rule, error);
1019                 break;
1020         default:
1021                 return rte_flow_error_set(error, ENOTSUP,
1022                                           RTE_FLOW_ERROR_TYPE_ITEM,
1023                                           NULL, "Unsupported tunnel type!");
1024         }
1025         if (ret)
1026                 return ret;
1027         return hns3_handle_tunnel(item, rule, error);
1028 }
1029
1030 static int
1031 hns3_parse_normal(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1032                   struct items_step_mngr *step_mngr,
1033                   struct rte_flow_error *error)
1034 {
1035         int ret;
1036
1037         if (item->spec == NULL && item->mask)
1038                 return rte_flow_error_set(error, EINVAL,
1039                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1040                                           "Can't configure FDIR with mask "
1041                                           "but without spec");
1042
1043         switch (item->type) {
1044         case RTE_FLOW_ITEM_TYPE_ETH:
1045                 ret = hns3_parse_eth(item, rule, error);
1046                 step_mngr->items = L2_next_items;
1047                 step_mngr->count = ARRAY_SIZE(L2_next_items);
1048                 break;
1049         case RTE_FLOW_ITEM_TYPE_VLAN:
1050                 ret = hns3_parse_vlan(item, rule, error);
1051                 step_mngr->items = L2_next_items;
1052                 step_mngr->count = ARRAY_SIZE(L2_next_items);
1053                 break;
1054         case RTE_FLOW_ITEM_TYPE_IPV4:
1055                 ret = hns3_parse_ipv4(item, rule, error);
1056                 step_mngr->items = L3_next_items;
1057                 step_mngr->count = ARRAY_SIZE(L3_next_items);
1058                 break;
1059         case RTE_FLOW_ITEM_TYPE_IPV6:
1060                 ret = hns3_parse_ipv6(item, rule, error);
1061                 step_mngr->items = L3_next_items;
1062                 step_mngr->count = ARRAY_SIZE(L3_next_items);
1063                 break;
1064         case RTE_FLOW_ITEM_TYPE_TCP:
1065                 ret = hns3_parse_tcp(item, rule, error);
1066                 step_mngr->items = L4_next_items;
1067                 step_mngr->count = ARRAY_SIZE(L4_next_items);
1068                 break;
1069         case RTE_FLOW_ITEM_TYPE_UDP:
1070                 ret = hns3_parse_udp(item, rule, error);
1071                 step_mngr->items = L4_next_items;
1072                 step_mngr->count = ARRAY_SIZE(L4_next_items);
1073                 break;
1074         case RTE_FLOW_ITEM_TYPE_SCTP:
1075                 ret = hns3_parse_sctp(item, rule, error);
1076                 step_mngr->items = L4_next_items;
1077                 step_mngr->count = ARRAY_SIZE(L4_next_items);
1078                 break;
1079         default:
1080                 return rte_flow_error_set(error, ENOTSUP,
1081                                           RTE_FLOW_ERROR_TYPE_ITEM,
1082                                           NULL, "Unsupported normal type!");
1083         }
1084
1085         return ret;
1086 }
1087
1088 static int
1089 hns3_validate_item(const struct rte_flow_item *item,
1090                    struct items_step_mngr step_mngr,
1091                    struct rte_flow_error *error)
1092 {
1093         int i;
1094
1095         if (item->last)
1096                 return rte_flow_error_set(error, ENOTSUP,
1097                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, item,
1098                                           "Not supported last point for range");
1099
1100         for (i = 0; i < step_mngr.count; i++) {
1101                 if (item->type == step_mngr.items[i])
1102                         break;
1103         }
1104
1105         if (i == step_mngr.count) {
1106                 return rte_flow_error_set(error, EINVAL,
1107                                           RTE_FLOW_ERROR_TYPE_ITEM,
1108                                           item, "Inval or missing item");
1109         }
1110         return 0;
1111 }
1112
1113 static inline bool
1114 is_tunnel_packet(enum rte_flow_item_type type)
1115 {
1116         if (type == RTE_FLOW_ITEM_TYPE_VXLAN_GPE ||
1117             type == RTE_FLOW_ITEM_TYPE_VXLAN ||
1118             type == RTE_FLOW_ITEM_TYPE_NVGRE ||
1119             type == RTE_FLOW_ITEM_TYPE_GENEVE)
1120                 return true;
1121         return false;
1122 }
1123
1124 /*
1125  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1126  * And get the flow director filter info BTW.
1127  * UDP/TCP/SCTP PATTERN:
1128  * The first not void item can be ETH or IPV4 or IPV6
1129  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1130  * The next not void item could be UDP or TCP or SCTP (optional)
1131  * The next not void item could be RAW (for flexbyte, optional)
1132  * The next not void item must be END.
1133  * A Fuzzy Match pattern can appear at any place before END.
1134  * Fuzzy Match is optional for IPV4 but is required for IPV6
1135  * MAC VLAN PATTERN:
1136  * The first not void item must be ETH.
1137  * The second not void item must be MAC VLAN.
1138  * The next not void item must be END.
1139  * ACTION:
1140  * The first not void action should be QUEUE or DROP.
1141  * The second not void optional action should be MARK,
1142  * mark_id is a uint32_t number.
1143  * The next not void action should be END.
1144  * UDP/TCP/SCTP pattern example:
1145  * ITEM         Spec                    Mask
1146  * ETH          NULL                    NULL
1147  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1148  *              dst_addr 192.167.3.50   0xFFFFFFFF
1149  * UDP/TCP/SCTP src_port        80      0xFFFF
1150  *              dst_port        80      0xFFFF
1151  * END
1152  * MAC VLAN pattern example:
1153  * ITEM         Spec                    Mask
1154  * ETH          dst_addr
1155                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1156                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1157  * MAC VLAN     tci     0x2016          0xEFFF
1158  * END
1159  * Other members in mask and spec should set to 0x00.
1160  * Item->last should be NULL.
1161  */
1162 static int
1163 hns3_parse_fdir_filter(struct rte_eth_dev *dev,
1164                        const struct rte_flow_item pattern[],
1165                        const struct rte_flow_action actions[],
1166                        struct hns3_fdir_rule *rule,
1167                        struct rte_flow_error *error)
1168 {
1169         struct hns3_adapter *hns = dev->data->dev_private;
1170         const struct rte_flow_item *item;
1171         struct items_step_mngr step_mngr;
1172         int ret;
1173
1174         /* FDIR is available only in PF driver */
1175         if (hns->is_vf)
1176                 return rte_flow_error_set(error, ENOTSUP,
1177                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1178                                           "Fdir not supported in VF");
1179
1180         step_mngr.items = first_items;
1181         step_mngr.count = ARRAY_SIZE(first_items);
1182         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1183                 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
1184                         continue;
1185
1186                 ret = hns3_validate_item(item, step_mngr, error);
1187                 if (ret)
1188                         return ret;
1189
1190                 if (is_tunnel_packet(item->type)) {
1191                         ret = hns3_parse_tunnel(item, rule, error);
1192                         if (ret)
1193                                 return ret;
1194                         step_mngr.items = tunnel_next_items;
1195                         step_mngr.count = ARRAY_SIZE(tunnel_next_items);
1196                 } else {
1197                         ret = hns3_parse_normal(item, rule, &step_mngr, error);
1198                         if (ret)
1199                                 return ret;
1200                 }
1201         }
1202
1203         return hns3_handle_actions(dev, actions, rule, error);
1204 }
1205
1206 void
1207 hns3_filterlist_init(struct rte_eth_dev *dev)
1208 {
1209         struct hns3_process_private *process_list = dev->process_private;
1210
1211         TAILQ_INIT(&process_list->fdir_list);
1212         TAILQ_INIT(&process_list->filter_rss_list);
1213         TAILQ_INIT(&process_list->flow_list);
1214 }
1215
1216 static void
1217 hns3_filterlist_flush(struct rte_eth_dev *dev)
1218 {
1219         struct hns3_process_private *process_list = dev->process_private;
1220         struct hns3_fdir_rule_ele *fdir_rule_ptr;
1221         struct hns3_rss_conf_ele *rss_filter_ptr;
1222         struct hns3_flow_mem *flow_node;
1223
1224         fdir_rule_ptr = TAILQ_FIRST(&process_list->fdir_list);
1225         while (fdir_rule_ptr) {
1226                 TAILQ_REMOVE(&process_list->fdir_list, fdir_rule_ptr, entries);
1227                 rte_free(fdir_rule_ptr);
1228                 fdir_rule_ptr = TAILQ_FIRST(&process_list->fdir_list);
1229         }
1230
1231         rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
1232         while (rss_filter_ptr) {
1233                 TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr,
1234                              entries);
1235                 rte_free(rss_filter_ptr);
1236                 rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
1237         }
1238
1239         flow_node = TAILQ_FIRST(&process_list->flow_list);
1240         while (flow_node) {
1241                 TAILQ_REMOVE(&process_list->flow_list, flow_node, entries);
1242                 rte_free(flow_node->flow);
1243                 rte_free(flow_node);
1244                 flow_node = TAILQ_FIRST(&process_list->flow_list);
1245         }
1246 }
1247
1248 static bool
1249 hns3_action_rss_same(const struct rte_flow_action_rss *comp,
1250                      const struct rte_flow_action_rss *with)
1251 {
1252         bool func_is_same;
1253
1254         /*
1255          * When user flush all RSS rule, RSS func is set invalid with
1256          * RTE_ETH_HASH_FUNCTION_MAX. Then the user create a flow after
1257          * flushed, any validate RSS func is different with it before
1258          * flushed. Others, when user create an action RSS with RSS func
1259          * specified RTE_ETH_HASH_FUNCTION_DEFAULT, the func is the same
1260          * between continuous RSS flow.
1261          */
1262         if (comp->func == RTE_ETH_HASH_FUNCTION_MAX)
1263                 func_is_same = false;
1264         else
1265                 func_is_same = with->func ? (comp->func == with->func) : true;
1266
1267         return (func_is_same &&
1268                 comp->types == (with->types & HNS3_ETH_RSS_SUPPORT) &&
1269                 comp->level == with->level && comp->key_len == with->key_len &&
1270                 comp->queue_num == with->queue_num &&
1271                 !memcmp(comp->key, with->key, with->key_len) &&
1272                 !memcmp(comp->queue, with->queue,
1273                         sizeof(*with->queue) * with->queue_num));
1274 }
1275
1276 static int
1277 hns3_rss_conf_copy(struct hns3_rss_conf *out,
1278                    const struct rte_flow_action_rss *in)
1279 {
1280         if (in->key_len > RTE_DIM(out->key) ||
1281             in->queue_num > RTE_DIM(out->queue))
1282                 return -EINVAL;
1283         if (in->key == NULL && in->key_len)
1284                 return -EINVAL;
1285         out->conf = (struct rte_flow_action_rss) {
1286                 .func = in->func,
1287                 .level = in->level,
1288                 .types = in->types,
1289                 .key_len = in->key_len,
1290                 .queue_num = in->queue_num,
1291         };
1292         out->conf.queue = memcpy(out->queue, in->queue,
1293                                 sizeof(*in->queue) * in->queue_num);
1294         if (in->key)
1295                 out->conf.key = memcpy(out->key, in->key, in->key_len);
1296
1297         return 0;
1298 }
1299
1300 static bool
1301 hns3_rss_input_tuple_supported(struct hns3_hw *hw,
1302                                const struct rte_flow_action_rss *rss)
1303 {
1304         /*
1305          * For IP packet, it is not supported to use src/dst port fields to RSS
1306          * hash for the following packet types.
1307          * - IPV4 FRAG | IPV4 NONFRAG | IPV6 FRAG | IPV6 NONFRAG
1308          * Besides, for Kunpeng920, the NIC HW is not supported to use src/dst
1309          * port fields to RSS hash for IPV6 SCTP packet type. However, the
1310          * Kunpeng930 and future kunpeng series support to use src/dst port
1311          * fields to RSS hash for IPv6 SCTP packet type.
1312          */
1313         if (rss->types & (ETH_RSS_L4_DST_ONLY | ETH_RSS_L4_SRC_ONLY) &&
1314             (rss->types & ETH_RSS_IP ||
1315             (!hw->rss_info.ipv6_sctp_offload_supported &&
1316             rss->types & ETH_RSS_NONFRAG_IPV6_SCTP)))
1317                 return false;
1318
1319         return true;
1320 }
1321
1322 /*
1323  * This function is used to parse rss action validatation.
1324  */
1325 static int
1326 hns3_parse_rss_filter(struct rte_eth_dev *dev,
1327                       const struct rte_flow_action *actions,
1328                       struct rte_flow_error *error)
1329 {
1330         struct hns3_adapter *hns = dev->data->dev_private;
1331         struct hns3_hw *hw = &hns->hw;
1332         struct hns3_rss_conf *rss_conf = &hw->rss_info;
1333         const struct rte_flow_action_rss *rss;
1334         const struct rte_flow_action *act;
1335         uint32_t act_index = 0;
1336         uint16_t n;
1337
1338         NEXT_ITEM_OF_ACTION(act, actions, act_index);
1339         rss = act->conf;
1340
1341         if (rss == NULL) {
1342                 return rte_flow_error_set(error, EINVAL,
1343                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1344                                           act, "no valid queues");
1345         }
1346
1347         if (rss->queue_num > RTE_DIM(rss_conf->queue))
1348                 return rte_flow_error_set(error, ENOTSUP,
1349                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1350                                           "queue number configured exceeds "
1351                                           "queue buffer size driver supported");
1352
1353         for (n = 0; n < rss->queue_num; n++) {
1354                 if (rss->queue[n] < hw->alloc_rss_size)
1355                         continue;
1356                 return rte_flow_error_set(error, EINVAL,
1357                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1358                                           "queue id must be less than queue number allocated to a TC");
1359         }
1360
1361         if (!(rss->types & HNS3_ETH_RSS_SUPPORT) && rss->types)
1362                 return rte_flow_error_set(error, EINVAL,
1363                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1364                                           act,
1365                                           "Flow types is unsupported by "
1366                                           "hns3's RSS");
1367         if (rss->func >= RTE_ETH_HASH_FUNCTION_MAX)
1368                 return rte_flow_error_set(error, ENOTSUP,
1369                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1370                                           "RSS hash func are not supported");
1371         if (rss->level)
1372                 return rte_flow_error_set(error, ENOTSUP,
1373                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1374                                           "a nonzero RSS encapsulation level is not supported");
1375         if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
1376                 return rte_flow_error_set(error, ENOTSUP,
1377                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1378                                           "RSS hash key must be exactly 40 bytes");
1379
1380         if (!hns3_rss_input_tuple_supported(hw, rss))
1381                 return rte_flow_error_set(error, EINVAL,
1382                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1383                                           &rss->types,
1384                                           "input RSS types are not supported");
1385
1386         act_index++;
1387
1388         /* Check if the next not void action is END */
1389         NEXT_ITEM_OF_ACTION(act, actions, act_index);
1390         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1391                 memset(rss_conf, 0, sizeof(struct hns3_rss_conf));
1392                 return rte_flow_error_set(error, EINVAL,
1393                                           RTE_FLOW_ERROR_TYPE_ACTION,
1394                                           act, "Not supported action.");
1395         }
1396
1397         return 0;
1398 }
1399
1400 static int
1401 hns3_disable_rss(struct hns3_hw *hw)
1402 {
1403         int ret;
1404
1405         /* Redirected the redirection table to queue 0 */
1406         ret = hns3_rss_reset_indir_table(hw);
1407         if (ret)
1408                 return ret;
1409
1410         /* Disable RSS */
1411         hw->rss_info.conf.types = 0;
1412         hw->rss_dis_flag = true;
1413
1414         return 0;
1415 }
1416
1417 static void
1418 hns3_parse_rss_key(struct hns3_hw *hw, struct rte_flow_action_rss *rss_conf)
1419 {
1420         if (rss_conf->key == NULL || rss_conf->key_len < HNS3_RSS_KEY_SIZE) {
1421                 hns3_warn(hw, "Default RSS hash key to be set");
1422                 rss_conf->key = hns3_hash_key;
1423                 rss_conf->key_len = HNS3_RSS_KEY_SIZE;
1424         }
1425 }
1426
1427 static int
1428 hns3_parse_rss_algorithm(struct hns3_hw *hw, enum rte_eth_hash_function *func,
1429                          uint8_t *hash_algo)
1430 {
1431         enum rte_eth_hash_function algo_func = *func;
1432         switch (algo_func) {
1433         case RTE_ETH_HASH_FUNCTION_DEFAULT:
1434                 /* Keep *hash_algo as what it used to be */
1435                 algo_func = hw->rss_info.conf.func;
1436                 break;
1437         case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1438                 *hash_algo = HNS3_RSS_HASH_ALGO_TOEPLITZ;
1439                 break;
1440         case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
1441                 *hash_algo = HNS3_RSS_HASH_ALGO_SIMPLE;
1442                 break;
1443         case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ:
1444                 *hash_algo = HNS3_RSS_HASH_ALGO_SYMMETRIC_TOEP;
1445                 break;
1446         default:
1447                 hns3_err(hw, "Invalid RSS algorithm configuration(%d)",
1448                          algo_func);
1449                 return -EINVAL;
1450         }
1451         *func = algo_func;
1452
1453         return 0;
1454 }
1455
1456 static int
1457 hns3_hw_rss_hash_set(struct hns3_hw *hw, struct rte_flow_action_rss *rss_config)
1458 {
1459         struct hns3_rss_tuple_cfg *tuple;
1460         int ret;
1461
1462         hns3_parse_rss_key(hw, rss_config);
1463
1464         ret = hns3_parse_rss_algorithm(hw, &rss_config->func,
1465                                        &hw->rss_info.hash_algo);
1466         if (ret)
1467                 return ret;
1468
1469         ret = hns3_rss_set_algo_key(hw, rss_config->key);
1470         if (ret)
1471                 return ret;
1472
1473         hw->rss_info.conf.func = rss_config->func;
1474
1475         tuple = &hw->rss_info.rss_tuple_sets;
1476         ret = hns3_set_rss_tuple_by_rss_hf(hw, tuple, rss_config->types);
1477         if (ret)
1478                 hns3_err(hw, "Update RSS tuples by rss hf failed %d", ret);
1479
1480         return ret;
1481 }
1482
1483 static int
1484 hns3_update_indir_table(struct rte_eth_dev *dev,
1485                         const struct rte_flow_action_rss *conf, uint16_t num)
1486 {
1487         struct hns3_adapter *hns = dev->data->dev_private;
1488         struct hns3_hw *hw = &hns->hw;
1489         uint16_t indir_tbl[HNS3_RSS_IND_TBL_SIZE_MAX];
1490         uint16_t j;
1491         uint32_t i;
1492
1493         /* Fill in redirection table */
1494         memcpy(indir_tbl, hw->rss_info.rss_indirection_tbl,
1495                sizeof(hw->rss_info.rss_indirection_tbl));
1496         for (i = 0, j = 0; i < hw->rss_ind_tbl_size; i++, j++) {
1497                 j %= num;
1498                 if (conf->queue[j] >= hw->alloc_rss_size) {
1499                         hns3_err(hw, "queue id(%u) set to redirection table "
1500                                  "exceeds queue number(%u) allocated to a TC.",
1501                                  conf->queue[j], hw->alloc_rss_size);
1502                         return -EINVAL;
1503                 }
1504                 indir_tbl[i] = conf->queue[j];
1505         }
1506
1507         return hns3_set_rss_indir_table(hw, indir_tbl, hw->rss_ind_tbl_size);
1508 }
1509
1510 static int
1511 hns3_config_rss_filter(struct rte_eth_dev *dev,
1512                        const struct hns3_rss_conf *conf, bool add)
1513 {
1514         struct hns3_process_private *process_list = dev->process_private;
1515         struct hns3_adapter *hns = dev->data->dev_private;
1516         struct hns3_rss_conf_ele *rss_filter_ptr;
1517         struct hns3_hw *hw = &hns->hw;
1518         struct hns3_rss_conf *rss_info;
1519         uint64_t flow_types;
1520         uint16_t num;
1521         int ret;
1522
1523         struct rte_flow_action_rss rss_flow_conf = {
1524                 .func = conf->conf.func,
1525                 .level = conf->conf.level,
1526                 .types = conf->conf.types,
1527                 .key_len = conf->conf.key_len,
1528                 .queue_num = conf->conf.queue_num,
1529                 .key = conf->conf.key_len ?
1530                     (void *)(uintptr_t)conf->conf.key : NULL,
1531                 .queue = conf->conf.queue,
1532         };
1533
1534         /* Filter the unsupported flow types */
1535         flow_types = conf->conf.types ?
1536                      rss_flow_conf.types & HNS3_ETH_RSS_SUPPORT :
1537                      hw->rss_info.conf.types;
1538         if (flow_types != rss_flow_conf.types)
1539                 hns3_warn(hw, "modified RSS types based on hardware support, "
1540                               "requested:%" PRIx64 " configured:%" PRIx64,
1541                           rss_flow_conf.types, flow_types);
1542         /* Update the useful flow types */
1543         rss_flow_conf.types = flow_types;
1544
1545         rss_info = &hw->rss_info;
1546         if (!add) {
1547                 if (!conf->valid)
1548                         return 0;
1549
1550                 ret = hns3_disable_rss(hw);
1551                 if (ret) {
1552                         hns3_err(hw, "RSS disable failed(%d)", ret);
1553                         return ret;
1554                 }
1555
1556                 if (rss_flow_conf.queue_num) {
1557                         /*
1558                          * Due the content of queue pointer have been reset to
1559                          * 0, the rss_info->conf.queue should be set to NULL
1560                          */
1561                         rss_info->conf.queue = NULL;
1562                         rss_info->conf.queue_num = 0;
1563                 }
1564
1565                 /* set RSS func invalid after flushed */
1566                 rss_info->conf.func = RTE_ETH_HASH_FUNCTION_MAX;
1567                 return 0;
1568         }
1569
1570         /* Set rx queues to use */
1571         num = RTE_MIN(dev->data->nb_rx_queues, rss_flow_conf.queue_num);
1572         if (rss_flow_conf.queue_num > num)
1573                 hns3_warn(hw, "Config queue numbers %u are beyond the scope of truncated",
1574                           rss_flow_conf.queue_num);
1575         hns3_info(hw, "Max of contiguous %u PF queues are configured", num);
1576
1577         rte_spinlock_lock(&hw->lock);
1578         if (num) {
1579                 ret = hns3_update_indir_table(dev, &rss_flow_conf, num);
1580                 if (ret)
1581                         goto rss_config_err;
1582         }
1583
1584         /* Set hash algorithm and flow types by the user's config */
1585         ret = hns3_hw_rss_hash_set(hw, &rss_flow_conf);
1586         if (ret)
1587                 goto rss_config_err;
1588
1589         ret = hns3_rss_conf_copy(rss_info, &rss_flow_conf);
1590         if (ret) {
1591                 hns3_err(hw, "RSS config init fail(%d)", ret);
1592                 goto rss_config_err;
1593         }
1594
1595         /*
1596          * When create a new RSS rule, the old rule will be overlaid and set
1597          * invalid.
1598          */
1599         TAILQ_FOREACH(rss_filter_ptr, &process_list->filter_rss_list, entries)
1600                 rss_filter_ptr->filter_info.valid = false;
1601
1602 rss_config_err:
1603         rte_spinlock_unlock(&hw->lock);
1604
1605         return ret;
1606 }
1607
1608 static int
1609 hns3_clear_rss_filter(struct rte_eth_dev *dev)
1610 {
1611         struct hns3_process_private *process_list = dev->process_private;
1612         struct hns3_adapter *hns = dev->data->dev_private;
1613         struct hns3_rss_conf_ele *rss_filter_ptr;
1614         struct hns3_hw *hw = &hns->hw;
1615         int rss_rule_succ_cnt = 0; /* count for success of clearing RSS rules */
1616         int rss_rule_fail_cnt = 0; /* count for failure of clearing RSS rules */
1617         int ret = 0;
1618
1619         rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
1620         while (rss_filter_ptr) {
1621                 TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr,
1622                              entries);
1623                 ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info,
1624                                              false);
1625                 if (ret)
1626                         rss_rule_fail_cnt++;
1627                 else
1628                         rss_rule_succ_cnt++;
1629                 rte_free(rss_filter_ptr);
1630                 rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
1631         }
1632
1633         if (rss_rule_fail_cnt) {
1634                 hns3_err(hw, "fail to delete all RSS filters, success num = %d "
1635                              "fail num = %d", rss_rule_succ_cnt,
1636                              rss_rule_fail_cnt);
1637                 ret = -EIO;
1638         }
1639
1640         return ret;
1641 }
1642
1643 int
1644 hns3_restore_rss_filter(struct rte_eth_dev *dev)
1645 {
1646         struct hns3_adapter *hns = dev->data->dev_private;
1647         struct hns3_hw *hw = &hns->hw;
1648
1649         /* When user flush all rules, it doesn't need to restore RSS rule */
1650         if (hw->rss_info.conf.func == RTE_ETH_HASH_FUNCTION_MAX)
1651                 return 0;
1652
1653         return hns3_config_rss_filter(dev, &hw->rss_info, true);
1654 }
1655
1656 static int
1657 hns3_flow_parse_rss(struct rte_eth_dev *dev,
1658                     const struct hns3_rss_conf *conf, bool add)
1659 {
1660         struct hns3_adapter *hns = dev->data->dev_private;
1661         struct hns3_hw *hw = &hns->hw;
1662         bool ret;
1663
1664         ret = hns3_action_rss_same(&hw->rss_info.conf, &conf->conf);
1665         if (ret) {
1666                 hns3_err(hw, "Enter duplicate RSS configuration : %d", ret);
1667                 return -EINVAL;
1668         }
1669
1670         return hns3_config_rss_filter(dev, conf, add);
1671 }
1672
1673 static int
1674 hns3_flow_args_check(const struct rte_flow_attr *attr,
1675                      const struct rte_flow_item pattern[],
1676                      const struct rte_flow_action actions[],
1677                      struct rte_flow_error *error)
1678 {
1679         if (pattern == NULL)
1680                 return rte_flow_error_set(error, EINVAL,
1681                                           RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1682                                           NULL, "NULL pattern.");
1683
1684         if (actions == NULL)
1685                 return rte_flow_error_set(error, EINVAL,
1686                                           RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1687                                           NULL, "NULL action.");
1688
1689         if (attr == NULL)
1690                 return rte_flow_error_set(error, EINVAL,
1691                                           RTE_FLOW_ERROR_TYPE_ATTR,
1692                                           NULL, "NULL attribute.");
1693
1694         return hns3_check_attr(attr, error);
1695 }
1696
1697 /*
1698  * Check if the flow rule is supported by hns3.
1699  * It only checkes the format. Don't guarantee the rule can be programmed into
1700  * the HW. Because there can be no enough room for the rule.
1701  */
1702 static int
1703 hns3_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1704                    const struct rte_flow_item pattern[],
1705                    const struct rte_flow_action actions[],
1706                    struct rte_flow_error *error)
1707 {
1708         struct hns3_fdir_rule fdir_rule;
1709         int ret;
1710
1711         ret = hns3_flow_args_check(attr, pattern, actions, error);
1712         if (ret)
1713                 return ret;
1714
1715         if (hns3_find_rss_general_action(pattern, actions))
1716                 return hns3_parse_rss_filter(dev, actions, error);
1717
1718         memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
1719         return hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
1720 }
1721
1722 /*
1723  * Create or destroy a flow rule.
1724  * Theorically one rule can match more than one filters.
1725  * We will let it use the filter which it hit first.
1726  * So, the sequence matters.
1727  */
1728 static struct rte_flow *
1729 hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1730                  const struct rte_flow_item pattern[],
1731                  const struct rte_flow_action actions[],
1732                  struct rte_flow_error *error)
1733 {
1734         struct hns3_process_private *process_list = dev->process_private;
1735         struct hns3_adapter *hns = dev->data->dev_private;
1736         struct hns3_hw *hw = &hns->hw;
1737         const struct hns3_rss_conf *rss_conf;
1738         struct hns3_fdir_rule_ele *fdir_rule_ptr;
1739         struct hns3_rss_conf_ele *rss_filter_ptr;
1740         struct hns3_flow_mem *flow_node;
1741         const struct rte_flow_action *act;
1742         struct rte_flow *flow;
1743         struct hns3_fdir_rule fdir_rule;
1744         int ret;
1745
1746         ret = hns3_flow_validate(dev, attr, pattern, actions, error);
1747         if (ret)
1748                 return NULL;
1749
1750         flow = rte_zmalloc("hns3 flow", sizeof(struct rte_flow), 0);
1751         if (flow == NULL) {
1752                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1753                                    NULL, "Failed to allocate flow memory");
1754                 return NULL;
1755         }
1756         flow_node = rte_zmalloc("hns3 flow node",
1757                                 sizeof(struct hns3_flow_mem), 0);
1758         if (flow_node == NULL) {
1759                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1760                                    NULL, "Failed to allocate flow list memory");
1761                 rte_free(flow);
1762                 return NULL;
1763         }
1764
1765         flow_node->flow = flow;
1766         TAILQ_INSERT_TAIL(&process_list->flow_list, flow_node, entries);
1767
1768         act = hns3_find_rss_general_action(pattern, actions);
1769         if (act) {
1770                 rss_conf = act->conf;
1771
1772                 ret = hns3_flow_parse_rss(dev, rss_conf, true);
1773                 if (ret)
1774                         goto err;
1775
1776                 rss_filter_ptr = rte_zmalloc("hns3 rss filter",
1777                                              sizeof(struct hns3_rss_conf_ele),
1778                                              0);
1779                 if (rss_filter_ptr == NULL) {
1780                         hns3_err(hw,
1781                                     "Failed to allocate hns3_rss_filter memory");
1782                         ret = -ENOMEM;
1783                         goto err;
1784                 }
1785                 hns3_rss_conf_copy(&rss_filter_ptr->filter_info,
1786                                    &rss_conf->conf);
1787                 rss_filter_ptr->filter_info.valid = true;
1788                 TAILQ_INSERT_TAIL(&process_list->filter_rss_list,
1789                                   rss_filter_ptr, entries);
1790
1791                 flow->rule = rss_filter_ptr;
1792                 flow->filter_type = RTE_ETH_FILTER_HASH;
1793                 return flow;
1794         }
1795
1796         memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
1797         ret = hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
1798         if (ret)
1799                 goto out;
1800
1801         if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) {
1802                 ret = hns3_counter_new(dev, fdir_rule.act_cnt.shared,
1803                                        fdir_rule.act_cnt.id, error);
1804                 if (ret)
1805                         goto out;
1806
1807                 flow->counter_id = fdir_rule.act_cnt.id;
1808         }
1809
1810         fdir_rule_ptr = rte_zmalloc("hns3 fdir rule",
1811                                     sizeof(struct hns3_fdir_rule_ele),
1812                                     0);
1813         if (fdir_rule_ptr == NULL) {
1814                 hns3_err(hw, "failed to allocate fdir_rule memory.");
1815                 ret = -ENOMEM;
1816                 goto err_fdir;
1817         }
1818
1819         ret = hns3_fdir_filter_program(hns, &fdir_rule, false);
1820         if (!ret) {
1821                 memcpy(&fdir_rule_ptr->fdir_conf, &fdir_rule,
1822                         sizeof(struct hns3_fdir_rule));
1823                 TAILQ_INSERT_TAIL(&process_list->fdir_list,
1824                                   fdir_rule_ptr, entries);
1825                 flow->rule = fdir_rule_ptr;
1826                 flow->filter_type = RTE_ETH_FILTER_FDIR;
1827
1828                 return flow;
1829         }
1830
1831         rte_free(fdir_rule_ptr);
1832 err_fdir:
1833         if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
1834                 hns3_counter_release(dev, fdir_rule.act_cnt.id);
1835 err:
1836         rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1837                            "Failed to create flow");
1838 out:
1839         TAILQ_REMOVE(&process_list->flow_list, flow_node, entries);
1840         rte_free(flow_node);
1841         rte_free(flow);
1842         return NULL;
1843 }
1844
1845 /* Destroy a flow rule on hns3. */
1846 static int
1847 hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1848                   struct rte_flow_error *error)
1849 {
1850         struct hns3_process_private *process_list = dev->process_private;
1851         struct hns3_adapter *hns = dev->data->dev_private;
1852         struct hns3_fdir_rule_ele *fdir_rule_ptr;
1853         struct hns3_rss_conf_ele *rss_filter_ptr;
1854         struct hns3_flow_mem *flow_node;
1855         enum rte_filter_type filter_type;
1856         struct hns3_fdir_rule fdir_rule;
1857         int ret;
1858
1859         if (flow == NULL)
1860                 return rte_flow_error_set(error, EINVAL,
1861                                           RTE_FLOW_ERROR_TYPE_HANDLE,
1862                                           flow, "Flow is NULL");
1863
1864         filter_type = flow->filter_type;
1865         switch (filter_type) {
1866         case RTE_ETH_FILTER_FDIR:
1867                 fdir_rule_ptr = (struct hns3_fdir_rule_ele *)flow->rule;
1868                 memcpy(&fdir_rule, &fdir_rule_ptr->fdir_conf,
1869                            sizeof(struct hns3_fdir_rule));
1870
1871                 ret = hns3_fdir_filter_program(hns, &fdir_rule, true);
1872                 if (ret)
1873                         return rte_flow_error_set(error, EIO,
1874                                                   RTE_FLOW_ERROR_TYPE_HANDLE,
1875                                                   flow,
1876                                                   "Destroy FDIR fail.Try again");
1877                 if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
1878                         hns3_counter_release(dev, fdir_rule.act_cnt.id);
1879                 TAILQ_REMOVE(&process_list->fdir_list, fdir_rule_ptr, entries);
1880                 rte_free(fdir_rule_ptr);
1881                 fdir_rule_ptr = NULL;
1882                 break;
1883         case RTE_ETH_FILTER_HASH:
1884                 rss_filter_ptr = (struct hns3_rss_conf_ele *)flow->rule;
1885                 ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info,
1886                                              false);
1887                 if (ret)
1888                         return rte_flow_error_set(error, EIO,
1889                                                   RTE_FLOW_ERROR_TYPE_HANDLE,
1890                                                   flow,
1891                                                   "Destroy RSS fail.Try again");
1892                 TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr,
1893                              entries);
1894                 rte_free(rss_filter_ptr);
1895                 rss_filter_ptr = NULL;
1896                 break;
1897         default:
1898                 return rte_flow_error_set(error, EINVAL,
1899                                           RTE_FLOW_ERROR_TYPE_HANDLE, flow,
1900                                           "Unsupported filter type");
1901         }
1902
1903         TAILQ_FOREACH(flow_node, &process_list->flow_list, entries) {
1904                 if (flow_node->flow == flow) {
1905                         TAILQ_REMOVE(&process_list->flow_list, flow_node,
1906                                      entries);
1907                         rte_free(flow_node);
1908                         flow_node = NULL;
1909                         break;
1910                 }
1911         }
1912         rte_free(flow);
1913         flow = NULL;
1914
1915         return 0;
1916 }
1917
1918 /*  Destroy all flow rules associated with a port on hns3. */
1919 static int
1920 hns3_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1921 {
1922         struct hns3_adapter *hns = dev->data->dev_private;
1923         int ret;
1924
1925         /* FDIR is available only in PF driver */
1926         if (!hns->is_vf) {
1927                 ret = hns3_clear_all_fdir_filter(hns);
1928                 if (ret) {
1929                         rte_flow_error_set(error, ret,
1930                                            RTE_FLOW_ERROR_TYPE_HANDLE,
1931                                            NULL, "Failed to flush rule");
1932                         return ret;
1933                 }
1934                 hns3_counter_flush(dev);
1935         }
1936
1937         ret = hns3_clear_rss_filter(dev);
1938         if (ret) {
1939                 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1940                                    NULL, "Failed to flush rss filter");
1941                 return ret;
1942         }
1943
1944         hns3_filterlist_flush(dev);
1945
1946         return 0;
1947 }
1948
1949 /* Query an existing flow rule. */
1950 static int
1951 hns3_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
1952                 const struct rte_flow_action *actions, void *data,
1953                 struct rte_flow_error *error)
1954 {
1955         struct rte_flow_action_rss *rss_conf;
1956         struct hns3_rss_conf_ele *rss_rule;
1957         struct rte_flow_query_count *qc;
1958         int ret;
1959
1960         if (!flow->rule)
1961                 return rte_flow_error_set(error, EINVAL,
1962                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "invalid rule");
1963
1964         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1965                 switch (actions->type) {
1966                 case RTE_FLOW_ACTION_TYPE_VOID:
1967                         break;
1968                 case RTE_FLOW_ACTION_TYPE_COUNT:
1969                         qc = (struct rte_flow_query_count *)data;
1970                         ret = hns3_counter_query(dev, flow, qc, error);
1971                         if (ret)
1972                                 return ret;
1973                         break;
1974                 case RTE_FLOW_ACTION_TYPE_RSS:
1975                         if (flow->filter_type != RTE_ETH_FILTER_HASH) {
1976                                 return rte_flow_error_set(error, ENOTSUP,
1977                                         RTE_FLOW_ERROR_TYPE_ACTION,
1978                                         actions, "action is not supported");
1979                         }
1980                         rss_conf = (struct rte_flow_action_rss *)data;
1981                         rss_rule = (struct hns3_rss_conf_ele *)flow->rule;
1982                         rte_memcpy(rss_conf, &rss_rule->filter_info.conf,
1983                                    sizeof(struct rte_flow_action_rss));
1984                         break;
1985                 default:
1986                         return rte_flow_error_set(error, ENOTSUP,
1987                                 RTE_FLOW_ERROR_TYPE_ACTION,
1988                                 actions, "action is not supported");
1989                 }
1990         }
1991
1992         return 0;
1993 }
1994
1995 static const struct rte_flow_ops hns3_flow_ops = {
1996         .validate = hns3_flow_validate,
1997         .create = hns3_flow_create,
1998         .destroy = hns3_flow_destroy,
1999         .flush = hns3_flow_flush,
2000         .query = hns3_flow_query,
2001         .isolate = NULL,
2002 };
2003
2004 int
2005 hns3_dev_flow_ops_get(struct rte_eth_dev *dev,
2006                       const struct rte_flow_ops **ops)
2007 {
2008         struct hns3_hw *hw;
2009
2010         hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2011         if (hw->adapter_state >= HNS3_NIC_CLOSED)
2012                 return -ENODEV;
2013
2014         *ops = &hns3_flow_ops;
2015         return 0;
2016 }