net/hns3: rename RSS functions
[dpdk.git] / drivers / net / hns3 / hns3_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2019 Hisilicon Limited.
3  */
4
5 #include <rte_flow_driver.h>
6 #include <rte_io.h>
7 #include <rte_malloc.h>
8
9 #include "hns3_ethdev.h"
10 #include "hns3_logs.h"
11
12 /* Default default keys */
13 static uint8_t hns3_hash_key[] = {
14         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
15         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
16         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
17         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
18         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
19 };
20
21 static const uint8_t full_mask[VNI_OR_TNI_LEN] = { 0xFF, 0xFF, 0xFF };
22 static const uint8_t zero_mask[VNI_OR_TNI_LEN] = { 0x00, 0x00, 0x00 };
23
24 /* Special Filter id for non-specific packet flagging. Don't change value */
25 #define HNS3_MAX_FILTER_ID      0x0FFF
26
27 #define ETHER_TYPE_MASK         0xFFFF
28 #define IPPROTO_MASK            0xFF
29 #define TUNNEL_TYPE_MASK        0xFFFF
30
31 #define HNS3_TUNNEL_TYPE_VXLAN          0x12B5
32 #define HNS3_TUNNEL_TYPE_VXLAN_GPE      0x12B6
33 #define HNS3_TUNNEL_TYPE_GENEVE         0x17C1
34 #define HNS3_TUNNEL_TYPE_NVGRE          0x6558
35
36 static enum rte_flow_item_type first_items[] = {
37         RTE_FLOW_ITEM_TYPE_ETH,
38         RTE_FLOW_ITEM_TYPE_IPV4,
39         RTE_FLOW_ITEM_TYPE_IPV6,
40         RTE_FLOW_ITEM_TYPE_TCP,
41         RTE_FLOW_ITEM_TYPE_UDP,
42         RTE_FLOW_ITEM_TYPE_SCTP,
43         RTE_FLOW_ITEM_TYPE_ICMP,
44         RTE_FLOW_ITEM_TYPE_NVGRE,
45         RTE_FLOW_ITEM_TYPE_VXLAN,
46         RTE_FLOW_ITEM_TYPE_GENEVE,
47         RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
48         RTE_FLOW_ITEM_TYPE_MPLS
49 };
50
51 static enum rte_flow_item_type L2_next_items[] = {
52         RTE_FLOW_ITEM_TYPE_VLAN,
53         RTE_FLOW_ITEM_TYPE_IPV4,
54         RTE_FLOW_ITEM_TYPE_IPV6
55 };
56
57 static enum rte_flow_item_type L3_next_items[] = {
58         RTE_FLOW_ITEM_TYPE_TCP,
59         RTE_FLOW_ITEM_TYPE_UDP,
60         RTE_FLOW_ITEM_TYPE_SCTP,
61         RTE_FLOW_ITEM_TYPE_NVGRE,
62         RTE_FLOW_ITEM_TYPE_ICMP
63 };
64
65 static enum rte_flow_item_type L4_next_items[] = {
66         RTE_FLOW_ITEM_TYPE_VXLAN,
67         RTE_FLOW_ITEM_TYPE_GENEVE,
68         RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
69         RTE_FLOW_ITEM_TYPE_MPLS
70 };
71
72 static enum rte_flow_item_type tunnel_next_items[] = {
73         RTE_FLOW_ITEM_TYPE_ETH,
74         RTE_FLOW_ITEM_TYPE_VLAN
75 };
76
77 struct items_step_mngr {
78         enum rte_flow_item_type *items;
79         int count;
80 };
81
82 static inline void
83 net_addr_to_host(uint32_t *dst, const rte_be32_t *src, size_t len)
84 {
85         size_t i;
86
87         for (i = 0; i < len; i++)
88                 dst[i] = rte_be_to_cpu_32(src[i]);
89 }
90
91 /*
92  * This function is used to find rss general action.
93  * 1. As we know RSS is used to spread packets among several queues, the flow
94  *    API provide the struct rte_flow_action_rss, user could config it's field
95  *    sush as: func/level/types/key/queue to control RSS function.
96  * 2. The flow API also support queue region configuration for hns3. It was
97  *    implemented by FDIR + RSS in hns3 hardware, user can create one FDIR rule
98  *    which action is RSS queues region.
99  * 3. When action is RSS, we use the following rule to distinguish:
100  *    Case 1: pattern have ETH and action's queue_num > 0, indicate it is queue
101  *            region configuration.
102  *    Case other: an rss general action.
103  */
104 static const struct rte_flow_action *
105 hns3_find_rss_general_action(const struct rte_flow_item pattern[],
106                              const struct rte_flow_action actions[])
107 {
108         const struct rte_flow_action *act = NULL;
109         const struct hns3_rss_conf *rss;
110         bool have_eth = false;
111
112         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
113                 if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
114                         act = actions;
115                         break;
116                 }
117         }
118         if (!act)
119                 return NULL;
120
121         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
122                 if (pattern->type == RTE_FLOW_ITEM_TYPE_ETH) {
123                         have_eth = true;
124                         break;
125                 }
126         }
127
128         rss = act->conf;
129         if (have_eth && rss->conf.queue_num) {
130                 /*
131                  * Patter have ETH and action's queue_num > 0, indicate this is
132                  * queue region configuration.
133                  * Because queue region is implemented by FDIR + RSS in hns3
134                  * hardware, it need enter FDIR process, so here return NULL to
135                  * avoid enter RSS process.
136                  */
137                 return NULL;
138         }
139
140         return act;
141 }
142
143 static inline struct hns3_flow_counter *
144 hns3_counter_lookup(struct rte_eth_dev *dev, uint32_t id)
145 {
146         struct hns3_adapter *hns = dev->data->dev_private;
147         struct hns3_pf *pf = &hns->pf;
148         struct hns3_flow_counter *cnt;
149
150         LIST_FOREACH(cnt, &pf->flow_counters, next) {
151                 if (cnt->id == id)
152                         return cnt;
153         }
154         return NULL;
155 }
156
157 static int
158 hns3_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
159                  struct rte_flow_error *error)
160 {
161         struct hns3_adapter *hns = dev->data->dev_private;
162         struct hns3_pf *pf = &hns->pf;
163         struct hns3_flow_counter *cnt;
164
165         cnt = hns3_counter_lookup(dev, id);
166         if (cnt) {
167                 if (!cnt->shared || cnt->shared != shared)
168                         return rte_flow_error_set(error, ENOTSUP,
169                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
170                                 cnt,
171                                 "Counter id is used, shared flag not match");
172                 cnt->ref_cnt++;
173                 return 0;
174         }
175
176         cnt = rte_zmalloc("hns3 counter", sizeof(*cnt), 0);
177         if (cnt == NULL)
178                 return rte_flow_error_set(error, ENOMEM,
179                                           RTE_FLOW_ERROR_TYPE_HANDLE, cnt,
180                                           "Alloc mem for counter failed");
181         cnt->id = id;
182         cnt->shared = shared;
183         cnt->ref_cnt = 1;
184         cnt->hits = 0;
185         LIST_INSERT_HEAD(&pf->flow_counters, cnt, next);
186         return 0;
187 }
188
189 static int
190 hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow,
191                    struct rte_flow_query_count *qc,
192                    struct rte_flow_error *error)
193 {
194         struct hns3_adapter *hns = dev->data->dev_private;
195         struct hns3_flow_counter *cnt;
196         uint64_t value;
197         int ret;
198
199         /* FDIR is available only in PF driver */
200         if (hns->is_vf)
201                 return rte_flow_error_set(error, ENOTSUP,
202                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
203                                           "Fdir is not supported in VF");
204         cnt = hns3_counter_lookup(dev, flow->counter_id);
205         if (cnt == NULL)
206                 return rte_flow_error_set(error, EINVAL,
207                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
208                                           "Can't find counter id");
209
210         ret = hns3_get_count(&hns->hw, flow->counter_id, &value);
211         if (ret) {
212                 rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE,
213                                    NULL, "Read counter fail.");
214                 return ret;
215         }
216         qc->hits_set = 1;
217         qc->hits = value;
218
219         return 0;
220 }
221
222 static int
223 hns3_counter_release(struct rte_eth_dev *dev, uint32_t id)
224 {
225         struct hns3_adapter *hns = dev->data->dev_private;
226         struct hns3_hw *hw = &hns->hw;
227         struct hns3_flow_counter *cnt;
228
229         cnt = hns3_counter_lookup(dev, id);
230         if (cnt == NULL) {
231                 hns3_err(hw, "Can't find available counter to release");
232                 return -EINVAL;
233         }
234         cnt->ref_cnt--;
235         if (cnt->ref_cnt == 0) {
236                 LIST_REMOVE(cnt, next);
237                 rte_free(cnt);
238         }
239         return 0;
240 }
241
242 static void
243 hns3_counter_flush(struct rte_eth_dev *dev)
244 {
245         struct hns3_adapter *hns = dev->data->dev_private;
246         struct hns3_pf *pf = &hns->pf;
247         struct hns3_flow_counter *cnt_ptr;
248
249         cnt_ptr = LIST_FIRST(&pf->flow_counters);
250         while (cnt_ptr) {
251                 LIST_REMOVE(cnt_ptr, next);
252                 rte_free(cnt_ptr);
253                 cnt_ptr = LIST_FIRST(&pf->flow_counters);
254         }
255 }
256
257 static int
258 hns3_handle_action_queue(struct rte_eth_dev *dev,
259                          const struct rte_flow_action *action,
260                          struct hns3_fdir_rule *rule,
261                          struct rte_flow_error *error)
262 {
263         struct hns3_adapter *hns = dev->data->dev_private;
264         const struct rte_flow_action_queue *queue;
265         struct hns3_hw *hw = &hns->hw;
266
267         queue = (const struct rte_flow_action_queue *)action->conf;
268         if (queue->index >= hw->used_rx_queues) {
269                 hns3_err(hw, "queue ID(%u) is greater than number of "
270                           "available queue (%u) in driver.",
271                           queue->index, hw->used_rx_queues);
272                 return rte_flow_error_set(error, EINVAL,
273                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
274                                           action, "Invalid queue ID in PF");
275         }
276
277         rule->queue_id = queue->index;
278         rule->nb_queues = 1;
279         rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
280         return 0;
281 }
282
283 static int
284 hns3_handle_action_queue_region(struct rte_eth_dev *dev,
285                                 const struct rte_flow_action *action,
286                                 struct hns3_fdir_rule *rule,
287                                 struct rte_flow_error *error)
288 {
289         struct hns3_adapter *hns = dev->data->dev_private;
290         const struct rte_flow_action_rss *conf = action->conf;
291         struct hns3_hw *hw = &hns->hw;
292         uint16_t idx;
293
294         if (!hns3_dev_fd_queue_region_supported(hw))
295                 return rte_flow_error_set(error, ENOTSUP,
296                         RTE_FLOW_ERROR_TYPE_ACTION, action,
297                         "Not support config queue region!");
298
299         if ((!rte_is_power_of_2(conf->queue_num)) ||
300                 conf->queue_num > hw->rss_size_max ||
301                 conf->queue[0] >= hw->used_rx_queues ||
302                 conf->queue[0] + conf->queue_num > hw->used_rx_queues) {
303                 return rte_flow_error_set(error, EINVAL,
304                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, action,
305                         "Invalid start queue ID and queue num! the start queue "
306                         "ID must valid, the queue num must be power of 2 and "
307                         "<= rss_size_max.");
308         }
309
310         for (idx = 1; idx < conf->queue_num; idx++) {
311                 if (conf->queue[idx] != conf->queue[idx - 1] + 1)
312                         return rte_flow_error_set(error, EINVAL,
313                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF, action,
314                                 "Invalid queue ID sequence! the queue ID "
315                                 "must be continuous increment.");
316         }
317
318         rule->queue_id = conf->queue[0];
319         rule->nb_queues = conf->queue_num;
320         rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
321         return 0;
322 }
323
324 /*
325  * Parse actions structure from the provided pattern.
326  * The pattern is validated as the items are copied.
327  *
328  * @param actions[in]
329  * @param rule[out]
330  *   NIC specfilc actions derived from the actions.
331  * @param error[out]
332  */
333 static int
334 hns3_handle_actions(struct rte_eth_dev *dev,
335                     const struct rte_flow_action actions[],
336                     struct hns3_fdir_rule *rule, struct rte_flow_error *error)
337 {
338         struct hns3_adapter *hns = dev->data->dev_private;
339         const struct rte_flow_action_count *act_count;
340         const struct rte_flow_action_mark *mark;
341         struct hns3_pf *pf = &hns->pf;
342         uint32_t counter_num;
343         int ret;
344
345         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
346                 switch (actions->type) {
347                 case RTE_FLOW_ACTION_TYPE_QUEUE:
348                         ret = hns3_handle_action_queue(dev, actions, rule,
349                                                        error);
350                         if (ret)
351                                 return ret;
352                         break;
353                 case RTE_FLOW_ACTION_TYPE_DROP:
354                         rule->action = HNS3_FD_ACTION_DROP_PACKET;
355                         break;
356                 /*
357                  * Here RSS's real action is queue region.
358                  * Queue region is implemented by FDIR + RSS in hns3 hardware,
359                  * the FDIR's action is one queue region (start_queue_id and
360                  * queue_num), then RSS spread packets to the queue region by
361                  * RSS algorigthm.
362                  */
363                 case RTE_FLOW_ACTION_TYPE_RSS:
364                         ret = hns3_handle_action_queue_region(dev, actions,
365                                                               rule, error);
366                         if (ret)
367                                 return ret;
368                         break;
369                 case RTE_FLOW_ACTION_TYPE_MARK:
370                         mark =
371                             (const struct rte_flow_action_mark *)actions->conf;
372                         if (mark->id >= HNS3_MAX_FILTER_ID)
373                                 return rte_flow_error_set(error, EINVAL,
374                                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
375                                                 actions,
376                                                 "Invalid Mark ID");
377                         rule->fd_id = mark->id;
378                         rule->flags |= HNS3_RULE_FLAG_FDID;
379                         break;
380                 case RTE_FLOW_ACTION_TYPE_FLAG:
381                         rule->fd_id = HNS3_MAX_FILTER_ID;
382                         rule->flags |= HNS3_RULE_FLAG_FDID;
383                         break;
384                 case RTE_FLOW_ACTION_TYPE_COUNT:
385                         act_count =
386                             (const struct rte_flow_action_count *)actions->conf;
387                         counter_num = pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1];
388                         if (act_count->id >= counter_num)
389                                 return rte_flow_error_set(error, EINVAL,
390                                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
391                                                 actions,
392                                                 "Invalid counter id");
393                         rule->act_cnt = *act_count;
394                         rule->flags |= HNS3_RULE_FLAG_COUNTER;
395                         break;
396                 case RTE_FLOW_ACTION_TYPE_VOID:
397                         break;
398                 default:
399                         return rte_flow_error_set(error, ENOTSUP,
400                                                   RTE_FLOW_ERROR_TYPE_ACTION,
401                                                   NULL, "Unsupported action");
402                 }
403         }
404
405         return 0;
406 }
407
408 /* Parse to get the attr and action info of flow director rule. */
409 static int
410 hns3_check_attr(const struct rte_flow_attr *attr, struct rte_flow_error *error)
411 {
412         if (!attr->ingress)
413                 return rte_flow_error_set(error, EINVAL,
414                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
415                                           attr, "Ingress can't be zero");
416         if (attr->egress)
417                 return rte_flow_error_set(error, ENOTSUP,
418                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
419                                           attr, "Not support egress");
420         if (attr->transfer)
421                 return rte_flow_error_set(error, ENOTSUP,
422                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
423                                           attr, "No support for transfer");
424         if (attr->priority)
425                 return rte_flow_error_set(error, ENOTSUP,
426                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
427                                           attr, "Not support priority");
428         if (attr->group)
429                 return rte_flow_error_set(error, ENOTSUP,
430                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
431                                           attr, "Not support group");
432         return 0;
433 }
434
435 static int
436 hns3_parse_eth(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
437                struct rte_flow_error *error __rte_unused)
438 {
439         const struct rte_flow_item_eth *eth_spec;
440         const struct rte_flow_item_eth *eth_mask;
441
442         /* Only used to describe the protocol stack. */
443         if (item->spec == NULL && item->mask == NULL)
444                 return 0;
445
446         if (item->mask) {
447                 eth_mask = item->mask;
448                 if (eth_mask->type) {
449                         hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
450                         rule->key_conf.mask.ether_type =
451                             rte_be_to_cpu_16(eth_mask->type);
452                 }
453                 if (!rte_is_zero_ether_addr(&eth_mask->src)) {
454                         hns3_set_bit(rule->input_set, INNER_SRC_MAC, 1);
455                         memcpy(rule->key_conf.mask.src_mac,
456                                eth_mask->src.addr_bytes, RTE_ETHER_ADDR_LEN);
457                 }
458                 if (!rte_is_zero_ether_addr(&eth_mask->dst)) {
459                         hns3_set_bit(rule->input_set, INNER_DST_MAC, 1);
460                         memcpy(rule->key_conf.mask.dst_mac,
461                                eth_mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN);
462                 }
463         }
464
465         eth_spec = item->spec;
466         rule->key_conf.spec.ether_type = rte_be_to_cpu_16(eth_spec->type);
467         memcpy(rule->key_conf.spec.src_mac, eth_spec->src.addr_bytes,
468                RTE_ETHER_ADDR_LEN);
469         memcpy(rule->key_conf.spec.dst_mac, eth_spec->dst.addr_bytes,
470                RTE_ETHER_ADDR_LEN);
471         return 0;
472 }
473
474 static int
475 hns3_parse_vlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
476                 struct rte_flow_error *error)
477 {
478         const struct rte_flow_item_vlan *vlan_spec;
479         const struct rte_flow_item_vlan *vlan_mask;
480
481         rule->key_conf.vlan_num++;
482         if (rule->key_conf.vlan_num > VLAN_TAG_NUM_MAX)
483                 return rte_flow_error_set(error, EINVAL,
484                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
485                                           "Vlan_num is more than 2");
486
487         /* Only used to describe the protocol stack. */
488         if (item->spec == NULL && item->mask == NULL)
489                 return 0;
490
491         if (item->mask) {
492                 vlan_mask = item->mask;
493                 if (vlan_mask->tci) {
494                         if (rule->key_conf.vlan_num == 1) {
495                                 hns3_set_bit(rule->input_set, INNER_VLAN_TAG1,
496                                              1);
497                                 rule->key_conf.mask.vlan_tag1 =
498                                     rte_be_to_cpu_16(vlan_mask->tci);
499                         } else {
500                                 hns3_set_bit(rule->input_set, INNER_VLAN_TAG2,
501                                              1);
502                                 rule->key_conf.mask.vlan_tag2 =
503                                     rte_be_to_cpu_16(vlan_mask->tci);
504                         }
505                 }
506         }
507
508         vlan_spec = item->spec;
509         if (rule->key_conf.vlan_num == 1)
510                 rule->key_conf.spec.vlan_tag1 =
511                     rte_be_to_cpu_16(vlan_spec->tci);
512         else
513                 rule->key_conf.spec.vlan_tag2 =
514                     rte_be_to_cpu_16(vlan_spec->tci);
515         return 0;
516 }
517
518 static bool
519 hns3_check_ipv4_mask_supported(const struct rte_flow_item_ipv4 *ipv4_mask)
520 {
521         if (ipv4_mask->hdr.total_length || ipv4_mask->hdr.packet_id ||
522             ipv4_mask->hdr.fragment_offset || ipv4_mask->hdr.time_to_live ||
523             ipv4_mask->hdr.hdr_checksum)
524                 return false;
525
526         return true;
527 }
528
529 static int
530 hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
531                 struct rte_flow_error *error)
532 {
533         const struct rte_flow_item_ipv4 *ipv4_spec;
534         const struct rte_flow_item_ipv4 *ipv4_mask;
535
536         hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
537         rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV4;
538         rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
539
540         /* Only used to describe the protocol stack. */
541         if (item->spec == NULL && item->mask == NULL)
542                 return 0;
543
544         if (item->mask) {
545                 ipv4_mask = item->mask;
546                 if (!hns3_check_ipv4_mask_supported(ipv4_mask)) {
547                         return rte_flow_error_set(error, EINVAL,
548                                                   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
549                                                   item,
550                                                   "Only support src & dst ip,tos,proto in IPV4");
551                 }
552
553                 if (ipv4_mask->hdr.src_addr) {
554                         hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
555                         rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID] =
556                             rte_be_to_cpu_32(ipv4_mask->hdr.src_addr);
557                 }
558
559                 if (ipv4_mask->hdr.dst_addr) {
560                         hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
561                         rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID] =
562                             rte_be_to_cpu_32(ipv4_mask->hdr.dst_addr);
563                 }
564
565                 if (ipv4_mask->hdr.type_of_service) {
566                         hns3_set_bit(rule->input_set, INNER_IP_TOS, 1);
567                         rule->key_conf.mask.ip_tos =
568                             ipv4_mask->hdr.type_of_service;
569                 }
570
571                 if (ipv4_mask->hdr.next_proto_id) {
572                         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
573                         rule->key_conf.mask.ip_proto =
574                             ipv4_mask->hdr.next_proto_id;
575                 }
576         }
577
578         ipv4_spec = item->spec;
579         rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID] =
580             rte_be_to_cpu_32(ipv4_spec->hdr.src_addr);
581         rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID] =
582             rte_be_to_cpu_32(ipv4_spec->hdr.dst_addr);
583         rule->key_conf.spec.ip_tos = ipv4_spec->hdr.type_of_service;
584         rule->key_conf.spec.ip_proto = ipv4_spec->hdr.next_proto_id;
585         return 0;
586 }
587
588 static int
589 hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
590                 struct rte_flow_error *error)
591 {
592         const struct rte_flow_item_ipv6 *ipv6_spec;
593         const struct rte_flow_item_ipv6 *ipv6_mask;
594
595         hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
596         rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV6;
597         rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
598
599         /* Only used to describe the protocol stack. */
600         if (item->spec == NULL && item->mask == NULL)
601                 return 0;
602
603         if (item->mask) {
604                 ipv6_mask = item->mask;
605                 if (ipv6_mask->hdr.vtc_flow || ipv6_mask->hdr.payload_len ||
606                     ipv6_mask->hdr.hop_limits) {
607                         return rte_flow_error_set(error, EINVAL,
608                                                   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
609                                                   item,
610                                                   "Only support src & dst ip,proto in IPV6");
611                 }
612                 net_addr_to_host(rule->key_conf.mask.src_ip,
613                                  (const rte_be32_t *)ipv6_mask->hdr.src_addr,
614                                  IP_ADDR_LEN);
615                 net_addr_to_host(rule->key_conf.mask.dst_ip,
616                                  (const rte_be32_t *)ipv6_mask->hdr.dst_addr,
617                                  IP_ADDR_LEN);
618                 rule->key_conf.mask.ip_proto = ipv6_mask->hdr.proto;
619                 if (rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID])
620                         hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
621                 if (rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID])
622                         hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
623                 if (ipv6_mask->hdr.proto)
624                         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
625         }
626
627         ipv6_spec = item->spec;
628         net_addr_to_host(rule->key_conf.spec.src_ip,
629                          (const rte_be32_t *)ipv6_spec->hdr.src_addr,
630                          IP_ADDR_LEN);
631         net_addr_to_host(rule->key_conf.spec.dst_ip,
632                          (const rte_be32_t *)ipv6_spec->hdr.dst_addr,
633                          IP_ADDR_LEN);
634         rule->key_conf.spec.ip_proto = ipv6_spec->hdr.proto;
635
636         return 0;
637 }
638
639 static bool
640 hns3_check_tcp_mask_supported(const struct rte_flow_item_tcp *tcp_mask)
641 {
642         if (tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack ||
643             tcp_mask->hdr.data_off || tcp_mask->hdr.tcp_flags ||
644             tcp_mask->hdr.rx_win || tcp_mask->hdr.cksum ||
645             tcp_mask->hdr.tcp_urp)
646                 return false;
647
648         return true;
649 }
650
651 static int
652 hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
653                struct rte_flow_error *error)
654 {
655         const struct rte_flow_item_tcp *tcp_spec;
656         const struct rte_flow_item_tcp *tcp_mask;
657
658         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
659         rule->key_conf.spec.ip_proto = IPPROTO_TCP;
660         rule->key_conf.mask.ip_proto = IPPROTO_MASK;
661
662         /* Only used to describe the protocol stack. */
663         if (item->spec == NULL && item->mask == NULL)
664                 return 0;
665
666         if (item->mask) {
667                 tcp_mask = item->mask;
668                 if (!hns3_check_tcp_mask_supported(tcp_mask)) {
669                         return rte_flow_error_set(error, EINVAL,
670                                                   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
671                                                   item,
672                                                   "Only support src & dst port in TCP");
673                 }
674
675                 if (tcp_mask->hdr.src_port) {
676                         hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
677                         rule->key_conf.mask.src_port =
678                             rte_be_to_cpu_16(tcp_mask->hdr.src_port);
679                 }
680                 if (tcp_mask->hdr.dst_port) {
681                         hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
682                         rule->key_conf.mask.dst_port =
683                             rte_be_to_cpu_16(tcp_mask->hdr.dst_port);
684                 }
685         }
686
687         tcp_spec = item->spec;
688         rule->key_conf.spec.src_port = rte_be_to_cpu_16(tcp_spec->hdr.src_port);
689         rule->key_conf.spec.dst_port = rte_be_to_cpu_16(tcp_spec->hdr.dst_port);
690
691         return 0;
692 }
693
694 static int
695 hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
696                struct rte_flow_error *error)
697 {
698         const struct rte_flow_item_udp *udp_spec;
699         const struct rte_flow_item_udp *udp_mask;
700
701         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
702         rule->key_conf.spec.ip_proto = IPPROTO_UDP;
703         rule->key_conf.mask.ip_proto = IPPROTO_MASK;
704         /* Only used to describe the protocol stack. */
705         if (item->spec == NULL && item->mask == NULL)
706                 return 0;
707
708         if (item->mask) {
709                 udp_mask = item->mask;
710                 if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) {
711                         return rte_flow_error_set(error, EINVAL,
712                                                   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
713                                                   item,
714                                                   "Only support src & dst port in UDP");
715                 }
716                 if (udp_mask->hdr.src_port) {
717                         hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
718                         rule->key_conf.mask.src_port =
719                             rte_be_to_cpu_16(udp_mask->hdr.src_port);
720                 }
721                 if (udp_mask->hdr.dst_port) {
722                         hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
723                         rule->key_conf.mask.dst_port =
724                             rte_be_to_cpu_16(udp_mask->hdr.dst_port);
725                 }
726         }
727
728         udp_spec = item->spec;
729         rule->key_conf.spec.src_port = rte_be_to_cpu_16(udp_spec->hdr.src_port);
730         rule->key_conf.spec.dst_port = rte_be_to_cpu_16(udp_spec->hdr.dst_port);
731
732         return 0;
733 }
734
735 static int
736 hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
737                 struct rte_flow_error *error)
738 {
739         const struct rte_flow_item_sctp *sctp_spec;
740         const struct rte_flow_item_sctp *sctp_mask;
741
742         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
743         rule->key_conf.spec.ip_proto = IPPROTO_SCTP;
744         rule->key_conf.mask.ip_proto = IPPROTO_MASK;
745
746         /* Only used to describe the protocol stack. */
747         if (item->spec == NULL && item->mask == NULL)
748                 return 0;
749
750         if (item->mask) {
751                 sctp_mask = item->mask;
752                 if (sctp_mask->hdr.cksum)
753                         return rte_flow_error_set(error, EINVAL,
754                                                   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
755                                                   item,
756                                                   "Only support src & dst port in SCTP");
757                 if (sctp_mask->hdr.src_port) {
758                         hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
759                         rule->key_conf.mask.src_port =
760                             rte_be_to_cpu_16(sctp_mask->hdr.src_port);
761                 }
762                 if (sctp_mask->hdr.dst_port) {
763                         hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
764                         rule->key_conf.mask.dst_port =
765                             rte_be_to_cpu_16(sctp_mask->hdr.dst_port);
766                 }
767                 if (sctp_mask->hdr.tag) {
768                         hns3_set_bit(rule->input_set, INNER_SCTP_TAG, 1);
769                         rule->key_conf.mask.sctp_tag =
770                             rte_be_to_cpu_32(sctp_mask->hdr.tag);
771                 }
772         }
773
774         sctp_spec = item->spec;
775         rule->key_conf.spec.src_port =
776             rte_be_to_cpu_16(sctp_spec->hdr.src_port);
777         rule->key_conf.spec.dst_port =
778             rte_be_to_cpu_16(sctp_spec->hdr.dst_port);
779         rule->key_conf.spec.sctp_tag = rte_be_to_cpu_32(sctp_spec->hdr.tag);
780
781         return 0;
782 }
783
784 /*
785  * Check items before tunnel, save inner configs to outer configs,and clear
786  * inner configs.
787  * The key consists of two parts: meta_data and tuple keys.
788  * Meta data uses 15 bits, including vlan_num(2bit), des_port(12bit) and tunnel
789  * packet(1bit).
790  * Tuple keys uses 384bit, including ot_dst-mac(48bit), ot_dst-port(16bit),
791  * ot_tun_vni(24bit), ot_flow_id(8bit), src-mac(48bit), dst-mac(48bit),
792  * src-ip(32/128bit), dst-ip(32/128bit), src-port(16bit), dst-port(16bit),
793  * tos(8bit), ether-proto(16bit), ip-proto(8bit), vlantag1(16bit),
794  * Vlantag2(16bit) and sctp-tag(32bit).
795  */
796 static int
797 hns3_handle_tunnel(const struct rte_flow_item *item,
798                    struct hns3_fdir_rule *rule, struct rte_flow_error *error)
799 {
800         /* check eth config */
801         if (rule->input_set & (BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC)))
802                 return rte_flow_error_set(error, EINVAL,
803                                           RTE_FLOW_ERROR_TYPE_ITEM,
804                                           item, "Outer eth mac is unsupported");
805         if (rule->input_set & BIT(INNER_ETH_TYPE)) {
806                 hns3_set_bit(rule->input_set, OUTER_ETH_TYPE, 1);
807                 rule->key_conf.spec.outer_ether_type =
808                     rule->key_conf.spec.ether_type;
809                 rule->key_conf.mask.outer_ether_type =
810                     rule->key_conf.mask.ether_type;
811                 hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 0);
812                 rule->key_conf.spec.ether_type = 0;
813                 rule->key_conf.mask.ether_type = 0;
814         }
815
816         /* check vlan config */
817         if (rule->input_set & (BIT(INNER_VLAN_TAG1) | BIT(INNER_VLAN_TAG2)))
818                 return rte_flow_error_set(error, EINVAL,
819                                           RTE_FLOW_ERROR_TYPE_ITEM,
820                                           item,
821                                           "Outer vlan tags is unsupported");
822
823         /* clear vlan_num for inner vlan select */
824         rule->key_conf.outer_vlan_num = rule->key_conf.vlan_num;
825         rule->key_conf.vlan_num = 0;
826
827         /* check L3 config */
828         if (rule->input_set &
829             (BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | BIT(INNER_IP_TOS)))
830                 return rte_flow_error_set(error, EINVAL,
831                                           RTE_FLOW_ERROR_TYPE_ITEM,
832                                           item, "Outer ip is unsupported");
833         if (rule->input_set & BIT(INNER_IP_PROTO)) {
834                 hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
835                 rule->key_conf.spec.outer_proto = rule->key_conf.spec.ip_proto;
836                 rule->key_conf.mask.outer_proto = rule->key_conf.mask.ip_proto;
837                 hns3_set_bit(rule->input_set, INNER_IP_PROTO, 0);
838                 rule->key_conf.spec.ip_proto = 0;
839                 rule->key_conf.mask.ip_proto = 0;
840         }
841
842         /* check L4 config */
843         if (rule->input_set & BIT(INNER_SCTP_TAG))
844                 return rte_flow_error_set(error, EINVAL,
845                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
846                                           "Outer sctp tag is unsupported");
847
848         if (rule->input_set & BIT(INNER_SRC_PORT)) {
849                 hns3_set_bit(rule->input_set, OUTER_SRC_PORT, 1);
850                 rule->key_conf.spec.outer_src_port =
851                     rule->key_conf.spec.src_port;
852                 rule->key_conf.mask.outer_src_port =
853                     rule->key_conf.mask.src_port;
854                 hns3_set_bit(rule->input_set, INNER_SRC_PORT, 0);
855                 rule->key_conf.spec.src_port = 0;
856                 rule->key_conf.mask.src_port = 0;
857         }
858         if (rule->input_set & BIT(INNER_DST_PORT)) {
859                 hns3_set_bit(rule->input_set, INNER_DST_PORT, 0);
860                 rule->key_conf.spec.dst_port = 0;
861                 rule->key_conf.mask.dst_port = 0;
862         }
863         return 0;
864 }
865
866 static int
867 hns3_parse_vxlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
868                  struct rte_flow_error *error)
869 {
870         const struct rte_flow_item_vxlan *vxlan_spec;
871         const struct rte_flow_item_vxlan *vxlan_mask;
872
873         hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
874         rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
875         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
876                 rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN;
877         else
878                 rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN_GPE;
879
880         /* Only used to describe the protocol stack. */
881         if (item->spec == NULL && item->mask == NULL)
882                 return 0;
883
884         vxlan_mask = item->mask;
885         vxlan_spec = item->spec;
886
887         if (vxlan_mask->flags)
888                 return rte_flow_error_set(error, EINVAL,
889                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
890                                           "Flags is not supported in VxLAN");
891
892         /* VNI must be totally masked or not. */
893         if (memcmp(vxlan_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
894             memcmp(vxlan_mask->vni, zero_mask, VNI_OR_TNI_LEN))
895                 return rte_flow_error_set(error, EINVAL,
896                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
897                                           "VNI must be totally masked or not in VxLAN");
898         if (vxlan_mask->vni[0]) {
899                 hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
900                 memcpy(rule->key_conf.mask.outer_tun_vni, vxlan_mask->vni,
901                            VNI_OR_TNI_LEN);
902         }
903         memcpy(rule->key_conf.spec.outer_tun_vni, vxlan_spec->vni,
904                    VNI_OR_TNI_LEN);
905         return 0;
906 }
907
908 static int
909 hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
910                  struct rte_flow_error *error)
911 {
912         const struct rte_flow_item_nvgre *nvgre_spec;
913         const struct rte_flow_item_nvgre *nvgre_mask;
914
915         hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
916         rule->key_conf.spec.outer_proto = IPPROTO_GRE;
917         rule->key_conf.mask.outer_proto = IPPROTO_MASK;
918
919         hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
920         rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_NVGRE;
921         rule->key_conf.mask.tunnel_type = ~HNS3_TUNNEL_TYPE_NVGRE;
922         /* Only used to describe the protocol stack. */
923         if (item->spec == NULL && item->mask == NULL)
924                 return 0;
925
926         nvgre_mask = item->mask;
927         nvgre_spec = item->spec;
928
929         if (nvgre_mask->protocol || nvgre_mask->c_k_s_rsvd0_ver)
930                 return rte_flow_error_set(error, EINVAL,
931                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
932                                           "Ver/protocal is not supported in NVGRE");
933
934         /* TNI must be totally masked or not. */
935         if (memcmp(nvgre_mask->tni, full_mask, VNI_OR_TNI_LEN) &&
936             memcmp(nvgre_mask->tni, zero_mask, VNI_OR_TNI_LEN))
937                 return rte_flow_error_set(error, EINVAL,
938                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
939                                           "TNI must be totally masked or not in NVGRE");
940
941         if (nvgre_mask->tni[0]) {
942                 hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
943                 memcpy(rule->key_conf.mask.outer_tun_vni, nvgre_mask->tni,
944                            VNI_OR_TNI_LEN);
945         }
946         memcpy(rule->key_conf.spec.outer_tun_vni, nvgre_spec->tni,
947                    VNI_OR_TNI_LEN);
948
949         if (nvgre_mask->flow_id) {
950                 hns3_set_bit(rule->input_set, OUTER_TUN_FLOW_ID, 1);
951                 rule->key_conf.mask.outer_tun_flow_id = nvgre_mask->flow_id;
952         }
953         rule->key_conf.spec.outer_tun_flow_id = nvgre_spec->flow_id;
954         return 0;
955 }
956
957 static int
958 hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
959                   struct rte_flow_error *error)
960 {
961         const struct rte_flow_item_geneve *geneve_spec;
962         const struct rte_flow_item_geneve *geneve_mask;
963
964         hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
965         rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_GENEVE;
966         rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
967         /* Only used to describe the protocol stack. */
968         if (item->spec == NULL && item->mask == NULL)
969                 return 0;
970
971         geneve_mask = item->mask;
972         geneve_spec = item->spec;
973
974         if (geneve_mask->ver_opt_len_o_c_rsvd0 || geneve_mask->protocol)
975                 return rte_flow_error_set(error, EINVAL,
976                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
977                                           "Ver/protocal is not supported in GENEVE");
978         /* VNI must be totally masked or not. */
979         if (memcmp(geneve_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
980             memcmp(geneve_mask->vni, zero_mask, VNI_OR_TNI_LEN))
981                 return rte_flow_error_set(error, EINVAL,
982                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
983                                           "VNI must be totally masked or not in GENEVE");
984         if (geneve_mask->vni[0]) {
985                 hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
986                 memcpy(rule->key_conf.mask.outer_tun_vni, geneve_mask->vni,
987                            VNI_OR_TNI_LEN);
988         }
989         memcpy(rule->key_conf.spec.outer_tun_vni, geneve_spec->vni,
990                    VNI_OR_TNI_LEN);
991         return 0;
992 }
993
994 static int
995 hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
996                   struct rte_flow_error *error)
997 {
998         int ret;
999
1000         if (item->spec == NULL && item->mask)
1001                 return rte_flow_error_set(error, EINVAL,
1002                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1003                                           "Can't configure FDIR with mask "
1004                                           "but without spec");
1005         else if (item->spec && (item->mask == NULL))
1006                 return rte_flow_error_set(error, EINVAL,
1007                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1008                                           "Tunnel packets must configure "
1009                                           "with mask");
1010
1011         switch (item->type) {
1012         case RTE_FLOW_ITEM_TYPE_VXLAN:
1013         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1014                 ret = hns3_parse_vxlan(item, rule, error);
1015                 break;
1016         case RTE_FLOW_ITEM_TYPE_NVGRE:
1017                 ret = hns3_parse_nvgre(item, rule, error);
1018                 break;
1019         case RTE_FLOW_ITEM_TYPE_GENEVE:
1020                 ret = hns3_parse_geneve(item, rule, error);
1021                 break;
1022         default:
1023                 return rte_flow_error_set(error, ENOTSUP,
1024                                           RTE_FLOW_ERROR_TYPE_ITEM,
1025                                           NULL, "Unsupported tunnel type!");
1026         }
1027         if (ret)
1028                 return ret;
1029         return hns3_handle_tunnel(item, rule, error);
1030 }
1031
1032 static int
1033 hns3_parse_normal(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1034                   struct items_step_mngr *step_mngr,
1035                   struct rte_flow_error *error)
1036 {
1037         int ret;
1038
1039         if (item->spec == NULL && item->mask)
1040                 return rte_flow_error_set(error, EINVAL,
1041                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1042                                           "Can't configure FDIR with mask "
1043                                           "but without spec");
1044
1045         switch (item->type) {
1046         case RTE_FLOW_ITEM_TYPE_ETH:
1047                 ret = hns3_parse_eth(item, rule, error);
1048                 step_mngr->items = L2_next_items;
1049                 step_mngr->count = ARRAY_SIZE(L2_next_items);
1050                 break;
1051         case RTE_FLOW_ITEM_TYPE_VLAN:
1052                 ret = hns3_parse_vlan(item, rule, error);
1053                 step_mngr->items = L2_next_items;
1054                 step_mngr->count = ARRAY_SIZE(L2_next_items);
1055                 break;
1056         case RTE_FLOW_ITEM_TYPE_IPV4:
1057                 ret = hns3_parse_ipv4(item, rule, error);
1058                 step_mngr->items = L3_next_items;
1059                 step_mngr->count = ARRAY_SIZE(L3_next_items);
1060                 break;
1061         case RTE_FLOW_ITEM_TYPE_IPV6:
1062                 ret = hns3_parse_ipv6(item, rule, error);
1063                 step_mngr->items = L3_next_items;
1064                 step_mngr->count = ARRAY_SIZE(L3_next_items);
1065                 break;
1066         case RTE_FLOW_ITEM_TYPE_TCP:
1067                 ret = hns3_parse_tcp(item, rule, error);
1068                 step_mngr->items = L4_next_items;
1069                 step_mngr->count = ARRAY_SIZE(L4_next_items);
1070                 break;
1071         case RTE_FLOW_ITEM_TYPE_UDP:
1072                 ret = hns3_parse_udp(item, rule, error);
1073                 step_mngr->items = L4_next_items;
1074                 step_mngr->count = ARRAY_SIZE(L4_next_items);
1075                 break;
1076         case RTE_FLOW_ITEM_TYPE_SCTP:
1077                 ret = hns3_parse_sctp(item, rule, error);
1078                 step_mngr->items = L4_next_items;
1079                 step_mngr->count = ARRAY_SIZE(L4_next_items);
1080                 break;
1081         default:
1082                 return rte_flow_error_set(error, ENOTSUP,
1083                                           RTE_FLOW_ERROR_TYPE_ITEM,
1084                                           NULL, "Unsupported normal type!");
1085         }
1086
1087         return ret;
1088 }
1089
1090 static int
1091 hns3_validate_item(const struct rte_flow_item *item,
1092                    struct items_step_mngr step_mngr,
1093                    struct rte_flow_error *error)
1094 {
1095         int i;
1096
1097         if (item->last)
1098                 return rte_flow_error_set(error, ENOTSUP,
1099                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, item,
1100                                           "Not supported last point for range");
1101
1102         for (i = 0; i < step_mngr.count; i++) {
1103                 if (item->type == step_mngr.items[i])
1104                         break;
1105         }
1106
1107         if (i == step_mngr.count) {
1108                 return rte_flow_error_set(error, EINVAL,
1109                                           RTE_FLOW_ERROR_TYPE_ITEM,
1110                                           item, "Inval or missing item");
1111         }
1112         return 0;
1113 }
1114
1115 static inline bool
1116 is_tunnel_packet(enum rte_flow_item_type type)
1117 {
1118         if (type == RTE_FLOW_ITEM_TYPE_VXLAN_GPE ||
1119             type == RTE_FLOW_ITEM_TYPE_VXLAN ||
1120             type == RTE_FLOW_ITEM_TYPE_NVGRE ||
1121             type == RTE_FLOW_ITEM_TYPE_GENEVE ||
1122             type == RTE_FLOW_ITEM_TYPE_MPLS)
1123                 return true;
1124         return false;
1125 }
1126
1127 /*
1128  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1129  * And get the flow director filter info BTW.
1130  * UDP/TCP/SCTP PATTERN:
1131  * The first not void item can be ETH or IPV4 or IPV6
1132  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1133  * The next not void item could be UDP or TCP or SCTP (optional)
1134  * The next not void item could be RAW (for flexbyte, optional)
1135  * The next not void item must be END.
1136  * A Fuzzy Match pattern can appear at any place before END.
1137  * Fuzzy Match is optional for IPV4 but is required for IPV6
1138  * MAC VLAN PATTERN:
1139  * The first not void item must be ETH.
1140  * The second not void item must be MAC VLAN.
1141  * The next not void item must be END.
1142  * ACTION:
1143  * The first not void action should be QUEUE or DROP.
1144  * The second not void optional action should be MARK,
1145  * mark_id is a uint32_t number.
1146  * The next not void action should be END.
1147  * UDP/TCP/SCTP pattern example:
1148  * ITEM         Spec                    Mask
1149  * ETH          NULL                    NULL
1150  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1151  *              dst_addr 192.167.3.50   0xFFFFFFFF
1152  * UDP/TCP/SCTP src_port        80      0xFFFF
1153  *              dst_port        80      0xFFFF
1154  * END
1155  * MAC VLAN pattern example:
1156  * ITEM         Spec                    Mask
1157  * ETH          dst_addr
1158                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1159                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1160  * MAC VLAN     tci     0x2016          0xEFFF
1161  * END
1162  * Other members in mask and spec should set to 0x00.
1163  * Item->last should be NULL.
1164  */
1165 static int
1166 hns3_parse_fdir_filter(struct rte_eth_dev *dev,
1167                        const struct rte_flow_item pattern[],
1168                        const struct rte_flow_action actions[],
1169                        struct hns3_fdir_rule *rule,
1170                        struct rte_flow_error *error)
1171 {
1172         struct hns3_adapter *hns = dev->data->dev_private;
1173         const struct rte_flow_item *item;
1174         struct items_step_mngr step_mngr;
1175         int ret;
1176
1177         /* FDIR is available only in PF driver */
1178         if (hns->is_vf)
1179                 return rte_flow_error_set(error, ENOTSUP,
1180                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1181                                           "Fdir not supported in VF");
1182
1183         step_mngr.items = first_items;
1184         step_mngr.count = ARRAY_SIZE(first_items);
1185         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1186                 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
1187                         continue;
1188
1189                 ret = hns3_validate_item(item, step_mngr, error);
1190                 if (ret)
1191                         return ret;
1192
1193                 if (is_tunnel_packet(item->type)) {
1194                         ret = hns3_parse_tunnel(item, rule, error);
1195                         if (ret)
1196                                 return ret;
1197                         step_mngr.items = tunnel_next_items;
1198                         step_mngr.count = ARRAY_SIZE(tunnel_next_items);
1199                 } else {
1200                         ret = hns3_parse_normal(item, rule, &step_mngr, error);
1201                         if (ret)
1202                                 return ret;
1203                 }
1204         }
1205
1206         return hns3_handle_actions(dev, actions, rule, error);
1207 }
1208
1209 void
1210 hns3_filterlist_init(struct rte_eth_dev *dev)
1211 {
1212         struct hns3_process_private *process_list = dev->process_private;
1213
1214         TAILQ_INIT(&process_list->fdir_list);
1215         TAILQ_INIT(&process_list->filter_rss_list);
1216         TAILQ_INIT(&process_list->flow_list);
1217 }
1218
1219 static void
1220 hns3_filterlist_flush(struct rte_eth_dev *dev)
1221 {
1222         struct hns3_process_private *process_list = dev->process_private;
1223         struct hns3_fdir_rule_ele *fdir_rule_ptr;
1224         struct hns3_rss_conf_ele *rss_filter_ptr;
1225         struct hns3_flow_mem *flow_node;
1226
1227         fdir_rule_ptr = TAILQ_FIRST(&process_list->fdir_list);
1228         while (fdir_rule_ptr) {
1229                 TAILQ_REMOVE(&process_list->fdir_list, fdir_rule_ptr, entries);
1230                 rte_free(fdir_rule_ptr);
1231                 fdir_rule_ptr = TAILQ_FIRST(&process_list->fdir_list);
1232         }
1233
1234         rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
1235         while (rss_filter_ptr) {
1236                 TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr,
1237                              entries);
1238                 rte_free(rss_filter_ptr);
1239                 rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
1240         }
1241
1242         flow_node = TAILQ_FIRST(&process_list->flow_list);
1243         while (flow_node) {
1244                 TAILQ_REMOVE(&process_list->flow_list, flow_node, entries);
1245                 rte_free(flow_node->flow);
1246                 rte_free(flow_node);
1247                 flow_node = TAILQ_FIRST(&process_list->flow_list);
1248         }
1249 }
1250
1251 static bool
1252 hns3_action_rss_same(const struct rte_flow_action_rss *comp,
1253                      const struct rte_flow_action_rss *with)
1254 {
1255         bool func_is_same;
1256
1257         /*
1258          * When user flush all RSS rule, RSS func is set invalid with
1259          * RTE_ETH_HASH_FUNCTION_MAX. Then the user create a flow after
1260          * flushed, any validate RSS func is different with it before
1261          * flushed. Others, when user create an action RSS with RSS func
1262          * specified RTE_ETH_HASH_FUNCTION_DEFAULT, the func is the same
1263          * between continuous RSS flow.
1264          */
1265         if (comp->func == RTE_ETH_HASH_FUNCTION_MAX)
1266                 func_is_same = false;
1267         else
1268                 func_is_same = (with->func ? (comp->func == with->func) : true);
1269
1270         return (func_is_same &&
1271                 comp->types == (with->types & HNS3_ETH_RSS_SUPPORT) &&
1272                 comp->level == with->level && comp->key_len == with->key_len &&
1273                 comp->queue_num == with->queue_num &&
1274                 !memcmp(comp->key, with->key, with->key_len) &&
1275                 !memcmp(comp->queue, with->queue,
1276                         sizeof(*with->queue) * with->queue_num));
1277 }
1278
1279 static int
1280 hns3_rss_conf_copy(struct hns3_rss_conf *out,
1281                    const struct rte_flow_action_rss *in)
1282 {
1283         if (in->key_len > RTE_DIM(out->key) ||
1284             in->queue_num > RTE_DIM(out->queue))
1285                 return -EINVAL;
1286         if (in->key == NULL && in->key_len)
1287                 return -EINVAL;
1288         out->conf = (struct rte_flow_action_rss) {
1289                 .func = in->func,
1290                 .level = in->level,
1291                 .types = in->types,
1292                 .key_len = in->key_len,
1293                 .queue_num = in->queue_num,
1294         };
1295         out->conf.queue = memcpy(out->queue, in->queue,
1296                                 sizeof(*in->queue) * in->queue_num);
1297         if (in->key)
1298                 out->conf.key = memcpy(out->key, in->key, in->key_len);
1299
1300         return 0;
1301 }
1302
1303 static bool
1304 hns3_rss_input_tuple_supported(struct hns3_hw *hw,
1305                                const struct rte_flow_action_rss *rss)
1306 {
1307         /*
1308          * For IP packet, it is not supported to use src/dst port fields to RSS
1309          * hash for the following packet types.
1310          * - IPV4 FRAG | IPV4 NONFRAG | IPV6 FRAG | IPV6 NONFRAG
1311          * Besides, for Kunpeng920, the NIC HW is not supported to use src/dst
1312          * port fields to RSS hash for IPV6 SCTP packet type. However, the
1313          * Kunpeng930 and future kunpeng series support to use src/dst port
1314          * fields to RSS hash for IPv6 SCTP packet type.
1315          */
1316         if (rss->types & (ETH_RSS_L4_DST_ONLY | ETH_RSS_L4_SRC_ONLY) &&
1317             (rss->types & ETH_RSS_IP ||
1318             (!hw->rss_info.ipv6_sctp_offload_supported &&
1319             rss->types & ETH_RSS_NONFRAG_IPV6_SCTP)))
1320                 return false;
1321
1322         return true;
1323 }
1324
1325 /*
1326  * This function is used to parse rss action validatation.
1327  */
1328 static int
1329 hns3_parse_rss_filter(struct rte_eth_dev *dev,
1330                       const struct rte_flow_action *actions,
1331                       struct rte_flow_error *error)
1332 {
1333         struct hns3_adapter *hns = dev->data->dev_private;
1334         struct hns3_hw *hw = &hns->hw;
1335         struct hns3_rss_conf *rss_conf = &hw->rss_info;
1336         const struct rte_flow_action_rss *rss;
1337         const struct rte_flow_action *act;
1338         uint32_t act_index = 0;
1339         uint16_t n;
1340
1341         NEXT_ITEM_OF_ACTION(act, actions, act_index);
1342         rss = act->conf;
1343
1344         if (rss == NULL) {
1345                 return rte_flow_error_set(error, EINVAL,
1346                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1347                                           act, "no valid queues");
1348         }
1349
1350         if (rss->queue_num > RTE_DIM(rss_conf->queue))
1351                 return rte_flow_error_set(error, ENOTSUP,
1352                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1353                                           "queue number configured exceeds "
1354                                           "queue buffer size driver supported");
1355
1356         for (n = 0; n < rss->queue_num; n++) {
1357                 if (rss->queue[n] < hw->alloc_rss_size)
1358                         continue;
1359                 return rte_flow_error_set(error, EINVAL,
1360                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1361                                           "queue id must be less than queue number allocated to a TC");
1362         }
1363
1364         if (!(rss->types & HNS3_ETH_RSS_SUPPORT) && rss->types)
1365                 return rte_flow_error_set(error, EINVAL,
1366                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1367                                           act,
1368                                           "Flow types is unsupported by "
1369                                           "hns3's RSS");
1370         if (rss->func >= RTE_ETH_HASH_FUNCTION_MAX)
1371                 return rte_flow_error_set(error, ENOTSUP,
1372                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1373                                           "RSS hash func are not supported");
1374         if (rss->level)
1375                 return rte_flow_error_set(error, ENOTSUP,
1376                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1377                                           "a nonzero RSS encapsulation level is not supported");
1378         if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
1379                 return rte_flow_error_set(error, ENOTSUP,
1380                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1381                                           "RSS hash key must be exactly 40 bytes");
1382
1383         if (!hns3_rss_input_tuple_supported(hw, rss))
1384                 return rte_flow_error_set(error, EINVAL,
1385                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1386                                           &rss->types,
1387                                           "input RSS types are not supported");
1388
1389         act_index++;
1390
1391         /* Check if the next not void action is END */
1392         NEXT_ITEM_OF_ACTION(act, actions, act_index);
1393         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1394                 memset(rss_conf, 0, sizeof(struct hns3_rss_conf));
1395                 return rte_flow_error_set(error, EINVAL,
1396                                           RTE_FLOW_ERROR_TYPE_ACTION,
1397                                           act, "Not supported action.");
1398         }
1399
1400         return 0;
1401 }
1402
1403 static int
1404 hns3_disable_rss(struct hns3_hw *hw)
1405 {
1406         int ret;
1407
1408         /* Redirected the redirection table to queue 0 */
1409         ret = hns3_rss_reset_indir_table(hw);
1410         if (ret)
1411                 return ret;
1412
1413         /* Disable RSS */
1414         hw->rss_info.conf.types = 0;
1415         hw->rss_dis_flag = true;
1416
1417         return 0;
1418 }
1419
1420 static void
1421 hns3_parse_rss_key(struct hns3_hw *hw, struct rte_flow_action_rss *rss_conf)
1422 {
1423         if (rss_conf->key == NULL || rss_conf->key_len < HNS3_RSS_KEY_SIZE) {
1424                 hns3_warn(hw, "Default RSS hash key to be set");
1425                 rss_conf->key = hns3_hash_key;
1426                 rss_conf->key_len = HNS3_RSS_KEY_SIZE;
1427         }
1428 }
1429
1430 static int
1431 hns3_parse_rss_algorithm(struct hns3_hw *hw, enum rte_eth_hash_function *func,
1432                          uint8_t *hash_algo)
1433 {
1434         enum rte_eth_hash_function algo_func = *func;
1435         switch (algo_func) {
1436         case RTE_ETH_HASH_FUNCTION_DEFAULT:
1437                 /* Keep *hash_algo as what it used to be */
1438                 algo_func = hw->rss_info.conf.func;
1439                 break;
1440         case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1441                 *hash_algo = HNS3_RSS_HASH_ALGO_TOEPLITZ;
1442                 break;
1443         case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
1444                 *hash_algo = HNS3_RSS_HASH_ALGO_SIMPLE;
1445                 break;
1446         case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ:
1447                 *hash_algo = HNS3_RSS_HASH_ALGO_SYMMETRIC_TOEP;
1448                 break;
1449         default:
1450                 hns3_err(hw, "Invalid RSS algorithm configuration(%u)",
1451                          algo_func);
1452                 return -EINVAL;
1453         }
1454         *func = algo_func;
1455
1456         return 0;
1457 }
1458
1459 static int
1460 hns3_hw_rss_hash_set(struct hns3_hw *hw, struct rte_flow_action_rss *rss_config)
1461 {
1462         struct hns3_rss_tuple_cfg *tuple;
1463         int ret;
1464
1465         hns3_parse_rss_key(hw, rss_config);
1466
1467         ret = hns3_parse_rss_algorithm(hw, &rss_config->func,
1468                                        &hw->rss_info.hash_algo);
1469         if (ret)
1470                 return ret;
1471
1472         ret = hns3_rss_set_algo_key(hw, rss_config->key);
1473         if (ret)
1474                 return ret;
1475
1476         /* Update algorithm of hw */
1477         hw->rss_info.conf.func = rss_config->func;
1478
1479         /* Set flow type supported */
1480         tuple = &hw->rss_info.rss_tuple_sets;
1481         ret = hns3_set_rss_tuple_by_rss_hf(hw, tuple, rss_config->types);
1482         if (ret)
1483                 hns3_err(hw, "Update RSS tuples by rss hf failed %d", ret);
1484
1485         return ret;
1486 }
1487
1488 static int
1489 hns3_update_indir_table(struct rte_eth_dev *dev,
1490                         const struct rte_flow_action_rss *conf, uint16_t num)
1491 {
1492         struct hns3_adapter *hns = dev->data->dev_private;
1493         struct hns3_hw *hw = &hns->hw;
1494         uint16_t indir_tbl[HNS3_RSS_IND_TBL_SIZE];
1495         uint16_t j;
1496         uint32_t i;
1497
1498         /* Fill in redirection table */
1499         memcpy(indir_tbl, hw->rss_info.rss_indirection_tbl,
1500                sizeof(hw->rss_info.rss_indirection_tbl));
1501         for (i = 0, j = 0; i < HNS3_RSS_IND_TBL_SIZE; i++, j++) {
1502                 j %= num;
1503                 if (conf->queue[j] >= hw->alloc_rss_size) {
1504                         hns3_err(hw, "queue id(%u) set to redirection table "
1505                                  "exceeds queue number(%u) allocated to a TC.",
1506                                  conf->queue[j], hw->alloc_rss_size);
1507                         return -EINVAL;
1508                 }
1509                 indir_tbl[i] = conf->queue[j];
1510         }
1511
1512         return hns3_set_rss_indir_table(hw, indir_tbl, HNS3_RSS_IND_TBL_SIZE);
1513 }
1514
1515 static int
1516 hns3_config_rss_filter(struct rte_eth_dev *dev,
1517                        const struct hns3_rss_conf *conf, bool add)
1518 {
1519         struct hns3_process_private *process_list = dev->process_private;
1520         struct hns3_adapter *hns = dev->data->dev_private;
1521         struct hns3_rss_conf_ele *rss_filter_ptr;
1522         struct hns3_hw *hw = &hns->hw;
1523         struct hns3_rss_conf *rss_info;
1524         uint64_t flow_types;
1525         uint16_t num;
1526         int ret;
1527
1528         struct rte_flow_action_rss rss_flow_conf = {
1529                 .func = conf->conf.func,
1530                 .level = conf->conf.level,
1531                 .types = conf->conf.types,
1532                 .key_len = conf->conf.key_len,
1533                 .queue_num = conf->conf.queue_num,
1534                 .key = conf->conf.key_len ?
1535                     (void *)(uintptr_t)conf->conf.key : NULL,
1536                 .queue = conf->conf.queue,
1537         };
1538
1539         /* Filter the unsupported flow types */
1540         flow_types = conf->conf.types ?
1541                      rss_flow_conf.types & HNS3_ETH_RSS_SUPPORT :
1542                      hw->rss_info.conf.types;
1543         if (flow_types != rss_flow_conf.types)
1544                 hns3_warn(hw, "modified RSS types based on hardware support, "
1545                               "requested:%" PRIx64 " configured:%" PRIx64,
1546                           rss_flow_conf.types, flow_types);
1547         /* Update the useful flow types */
1548         rss_flow_conf.types = flow_types;
1549
1550         rss_info = &hw->rss_info;
1551         if (!add) {
1552                 if (!conf->valid)
1553                         return 0;
1554
1555                 ret = hns3_disable_rss(hw);
1556                 if (ret) {
1557                         hns3_err(hw, "RSS disable failed(%d)", ret);
1558                         return ret;
1559                 }
1560
1561                 if (rss_flow_conf.queue_num) {
1562                         /*
1563                          * Due the content of queue pointer have been reset to
1564                          * 0, the rss_info->conf.queue should be set NULL
1565                          */
1566                         rss_info->conf.queue = NULL;
1567                         rss_info->conf.queue_num = 0;
1568                 }
1569
1570                 /* set RSS func invalid after flushed */
1571                 rss_info->conf.func = RTE_ETH_HASH_FUNCTION_MAX;
1572                 return 0;
1573         }
1574
1575         /* Set rx queues to use */
1576         num = RTE_MIN(dev->data->nb_rx_queues, rss_flow_conf.queue_num);
1577         if (rss_flow_conf.queue_num > num)
1578                 hns3_warn(hw, "Config queue numbers %u are beyond the scope of truncated",
1579                           rss_flow_conf.queue_num);
1580         hns3_info(hw, "Max of contiguous %u PF queues are configured", num);
1581
1582         rte_spinlock_lock(&hw->lock);
1583         if (num) {
1584                 ret = hns3_update_indir_table(dev, &rss_flow_conf, num);
1585                 if (ret)
1586                         goto rss_config_err;
1587         }
1588
1589         /* Set hash algorithm and flow types by the user's config */
1590         ret = hns3_hw_rss_hash_set(hw, &rss_flow_conf);
1591         if (ret)
1592                 goto rss_config_err;
1593
1594         ret = hns3_rss_conf_copy(rss_info, &rss_flow_conf);
1595         if (ret) {
1596                 hns3_err(hw, "RSS config init fail(%d)", ret);
1597                 goto rss_config_err;
1598         }
1599
1600         /*
1601          * When create a new RSS rule, the old rule will be overlaid and set
1602          * invalid.
1603          */
1604         TAILQ_FOREACH(rss_filter_ptr, &process_list->filter_rss_list, entries)
1605                 rss_filter_ptr->filter_info.valid = false;
1606
1607 rss_config_err:
1608         rte_spinlock_unlock(&hw->lock);
1609
1610         return ret;
1611 }
1612
1613 static int
1614 hns3_clear_rss_filter(struct rte_eth_dev *dev)
1615 {
1616         struct hns3_process_private *process_list = dev->process_private;
1617         struct hns3_adapter *hns = dev->data->dev_private;
1618         struct hns3_rss_conf_ele *rss_filter_ptr;
1619         struct hns3_hw *hw = &hns->hw;
1620         int rss_rule_succ_cnt = 0; /* count for success of clearing RSS rules */
1621         int rss_rule_fail_cnt = 0; /* count for failure of clearing RSS rules */
1622         int ret = 0;
1623
1624         rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
1625         while (rss_filter_ptr) {
1626                 TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr,
1627                              entries);
1628                 ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info,
1629                                              false);
1630                 if (ret)
1631                         rss_rule_fail_cnt++;
1632                 else
1633                         rss_rule_succ_cnt++;
1634                 rte_free(rss_filter_ptr);
1635                 rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
1636         }
1637
1638         if (rss_rule_fail_cnt) {
1639                 hns3_err(hw, "fail to delete all RSS filters, success num = %d "
1640                              "fail num = %d", rss_rule_succ_cnt,
1641                              rss_rule_fail_cnt);
1642                 ret = -EIO;
1643         }
1644
1645         return ret;
1646 }
1647
1648 int
1649 hns3_restore_rss_filter(struct rte_eth_dev *dev)
1650 {
1651         struct hns3_adapter *hns = dev->data->dev_private;
1652         struct hns3_hw *hw = &hns->hw;
1653
1654         /* When user flush all rules, it doesn't need to restore RSS rule */
1655         if (hw->rss_info.conf.func == RTE_ETH_HASH_FUNCTION_MAX)
1656                 return 0;
1657
1658         return hns3_config_rss_filter(dev, &hw->rss_info, true);
1659 }
1660
1661 static int
1662 hns3_flow_parse_rss(struct rte_eth_dev *dev,
1663                     const struct hns3_rss_conf *conf, bool add)
1664 {
1665         struct hns3_adapter *hns = dev->data->dev_private;
1666         struct hns3_hw *hw = &hns->hw;
1667         bool ret;
1668
1669         ret = hns3_action_rss_same(&hw->rss_info.conf, &conf->conf);
1670         if (ret) {
1671                 hns3_err(hw, "Enter duplicate RSS configuration : %d", ret);
1672                 return -EINVAL;
1673         }
1674
1675         return hns3_config_rss_filter(dev, conf, add);
1676 }
1677
1678 static int
1679 hns3_flow_args_check(const struct rte_flow_attr *attr,
1680                      const struct rte_flow_item pattern[],
1681                      const struct rte_flow_action actions[],
1682                      struct rte_flow_error *error)
1683 {
1684         if (pattern == NULL)
1685                 return rte_flow_error_set(error, EINVAL,
1686                                           RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1687                                           NULL, "NULL pattern.");
1688
1689         if (actions == NULL)
1690                 return rte_flow_error_set(error, EINVAL,
1691                                           RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1692                                           NULL, "NULL action.");
1693
1694         if (attr == NULL)
1695                 return rte_flow_error_set(error, EINVAL,
1696                                           RTE_FLOW_ERROR_TYPE_ATTR,
1697                                           NULL, "NULL attribute.");
1698
1699         return hns3_check_attr(attr, error);
1700 }
1701
1702 /*
1703  * Check if the flow rule is supported by hns3.
1704  * It only checkes the format. Don't guarantee the rule can be programmed into
1705  * the HW. Because there can be no enough room for the rule.
1706  */
1707 static int
1708 hns3_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1709                    const struct rte_flow_item pattern[],
1710                    const struct rte_flow_action actions[],
1711                    struct rte_flow_error *error)
1712 {
1713         struct hns3_fdir_rule fdir_rule;
1714         int ret;
1715
1716         ret = hns3_flow_args_check(attr, pattern, actions, error);
1717         if (ret)
1718                 return ret;
1719
1720         if (hns3_find_rss_general_action(pattern, actions))
1721                 return hns3_parse_rss_filter(dev, actions, error);
1722
1723         memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
1724         return hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
1725 }
1726
1727 /*
1728  * Create or destroy a flow rule.
1729  * Theorically one rule can match more than one filters.
1730  * We will let it use the filter which it hitt first.
1731  * So, the sequence matters.
1732  */
1733 static struct rte_flow *
1734 hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1735                  const struct rte_flow_item pattern[],
1736                  const struct rte_flow_action actions[],
1737                  struct rte_flow_error *error)
1738 {
1739         struct hns3_process_private *process_list = dev->process_private;
1740         struct hns3_adapter *hns = dev->data->dev_private;
1741         struct hns3_hw *hw = &hns->hw;
1742         const struct hns3_rss_conf *rss_conf;
1743         struct hns3_fdir_rule_ele *fdir_rule_ptr;
1744         struct hns3_rss_conf_ele *rss_filter_ptr;
1745         struct hns3_flow_mem *flow_node;
1746         const struct rte_flow_action *act;
1747         struct rte_flow *flow;
1748         struct hns3_fdir_rule fdir_rule;
1749         int ret;
1750
1751         ret = hns3_flow_validate(dev, attr, pattern, actions, error);
1752         if (ret)
1753                 return NULL;
1754
1755         flow = rte_zmalloc("hns3 flow", sizeof(struct rte_flow), 0);
1756         if (flow == NULL) {
1757                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1758                                    NULL, "Failed to allocate flow memory");
1759                 return NULL;
1760         }
1761         flow_node = rte_zmalloc("hns3 flow node",
1762                                 sizeof(struct hns3_flow_mem), 0);
1763         if (flow_node == NULL) {
1764                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1765                                    NULL, "Failed to allocate flow list memory");
1766                 rte_free(flow);
1767                 return NULL;
1768         }
1769
1770         flow_node->flow = flow;
1771         TAILQ_INSERT_TAIL(&process_list->flow_list, flow_node, entries);
1772
1773         act = hns3_find_rss_general_action(pattern, actions);
1774         if (act) {
1775                 rss_conf = act->conf;
1776
1777                 ret = hns3_flow_parse_rss(dev, rss_conf, true);
1778                 if (ret)
1779                         goto err;
1780
1781                 rss_filter_ptr = rte_zmalloc("hns3 rss filter",
1782                                              sizeof(struct hns3_rss_conf_ele),
1783                                              0);
1784                 if (rss_filter_ptr == NULL) {
1785                         hns3_err(hw,
1786                                     "Failed to allocate hns3_rss_filter memory");
1787                         ret = -ENOMEM;
1788                         goto err;
1789                 }
1790                 hns3_rss_conf_copy(&rss_filter_ptr->filter_info,
1791                                    &rss_conf->conf);
1792                 rss_filter_ptr->filter_info.valid = true;
1793                 TAILQ_INSERT_TAIL(&process_list->filter_rss_list,
1794                                   rss_filter_ptr, entries);
1795
1796                 flow->rule = rss_filter_ptr;
1797                 flow->filter_type = RTE_ETH_FILTER_HASH;
1798                 return flow;
1799         }
1800
1801         memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
1802         ret = hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
1803         if (ret)
1804                 goto out;
1805
1806         if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) {
1807                 ret = hns3_counter_new(dev, fdir_rule.act_cnt.shared,
1808                                        fdir_rule.act_cnt.id, error);
1809                 if (ret)
1810                         goto out;
1811
1812                 flow->counter_id = fdir_rule.act_cnt.id;
1813         }
1814         ret = hns3_fdir_filter_program(hns, &fdir_rule, false);
1815         if (!ret) {
1816                 fdir_rule_ptr = rte_zmalloc("hns3 fdir rule",
1817                                             sizeof(struct hns3_fdir_rule_ele),
1818                                             0);
1819                 if (fdir_rule_ptr == NULL) {
1820                         hns3_err(hw, "Failed to allocate fdir_rule memory");
1821                         ret = -ENOMEM;
1822                         goto err_fdir;
1823                 }
1824
1825                 memcpy(&fdir_rule_ptr->fdir_conf, &fdir_rule,
1826                         sizeof(struct hns3_fdir_rule));
1827                 TAILQ_INSERT_TAIL(&process_list->fdir_list,
1828                                   fdir_rule_ptr, entries);
1829                 flow->rule = fdir_rule_ptr;
1830                 flow->filter_type = RTE_ETH_FILTER_FDIR;
1831
1832                 return flow;
1833         }
1834
1835 err_fdir:
1836         if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
1837                 hns3_counter_release(dev, fdir_rule.act_cnt.id);
1838
1839 err:
1840         rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1841                            "Failed to create flow");
1842 out:
1843         TAILQ_REMOVE(&process_list->flow_list, flow_node, entries);
1844         rte_free(flow_node);
1845         rte_free(flow);
1846         return NULL;
1847 }
1848
1849 /* Destroy a flow rule on hns3. */
1850 static int
1851 hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1852                   struct rte_flow_error *error)
1853 {
1854         struct hns3_process_private *process_list = dev->process_private;
1855         struct hns3_adapter *hns = dev->data->dev_private;
1856         struct hns3_fdir_rule_ele *fdir_rule_ptr;
1857         struct hns3_rss_conf_ele *rss_filter_ptr;
1858         struct hns3_flow_mem *flow_node;
1859         enum rte_filter_type filter_type;
1860         struct hns3_fdir_rule fdir_rule;
1861         int ret;
1862
1863         if (flow == NULL)
1864                 return rte_flow_error_set(error, EINVAL,
1865                                           RTE_FLOW_ERROR_TYPE_HANDLE,
1866                                           flow, "Flow is NULL");
1867         filter_type = flow->filter_type;
1868         switch (filter_type) {
1869         case RTE_ETH_FILTER_FDIR:
1870                 fdir_rule_ptr = (struct hns3_fdir_rule_ele *)flow->rule;
1871                 memcpy(&fdir_rule, &fdir_rule_ptr->fdir_conf,
1872                            sizeof(struct hns3_fdir_rule));
1873
1874                 ret = hns3_fdir_filter_program(hns, &fdir_rule, true);
1875                 if (ret)
1876                         return rte_flow_error_set(error, EIO,
1877                                                   RTE_FLOW_ERROR_TYPE_HANDLE,
1878                                                   flow,
1879                                                   "Destroy FDIR fail.Try again");
1880                 if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
1881                         hns3_counter_release(dev, fdir_rule.act_cnt.id);
1882                 TAILQ_REMOVE(&process_list->fdir_list, fdir_rule_ptr, entries);
1883                 rte_free(fdir_rule_ptr);
1884                 fdir_rule_ptr = NULL;
1885                 break;
1886         case RTE_ETH_FILTER_HASH:
1887                 rss_filter_ptr = (struct hns3_rss_conf_ele *)flow->rule;
1888                 ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info,
1889                                              false);
1890                 if (ret)
1891                         return rte_flow_error_set(error, EIO,
1892                                                   RTE_FLOW_ERROR_TYPE_HANDLE,
1893                                                   flow,
1894                                                   "Destroy RSS fail.Try again");
1895                 TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr,
1896                              entries);
1897                 rte_free(rss_filter_ptr);
1898                 rss_filter_ptr = NULL;
1899                 break;
1900         default:
1901                 return rte_flow_error_set(error, EINVAL,
1902                                           RTE_FLOW_ERROR_TYPE_HANDLE, flow,
1903                                           "Unsupported filter type");
1904         }
1905
1906         TAILQ_FOREACH(flow_node, &process_list->flow_list, entries) {
1907                 if (flow_node->flow == flow) {
1908                         TAILQ_REMOVE(&process_list->flow_list, flow_node,
1909                                      entries);
1910                         rte_free(flow_node);
1911                         flow_node = NULL;
1912                         break;
1913                 }
1914         }
1915         rte_free(flow);
1916         flow = NULL;
1917
1918         return 0;
1919 }
1920
1921 /*  Destroy all flow rules associated with a port on hns3. */
1922 static int
1923 hns3_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1924 {
1925         struct hns3_adapter *hns = dev->data->dev_private;
1926         int ret;
1927
1928         /* FDIR is available only in PF driver */
1929         if (!hns->is_vf) {
1930                 ret = hns3_clear_all_fdir_filter(hns);
1931                 if (ret) {
1932                         rte_flow_error_set(error, ret,
1933                                            RTE_FLOW_ERROR_TYPE_HANDLE,
1934                                            NULL, "Failed to flush rule");
1935                         return ret;
1936                 }
1937                 hns3_counter_flush(dev);
1938         }
1939
1940         ret = hns3_clear_rss_filter(dev);
1941         if (ret) {
1942                 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1943                                    NULL, "Failed to flush rss filter");
1944                 return ret;
1945         }
1946
1947         hns3_filterlist_flush(dev);
1948
1949         return 0;
1950 }
1951
1952 /* Query an existing flow rule. */
1953 static int
1954 hns3_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
1955                 const struct rte_flow_action *actions, void *data,
1956                 struct rte_flow_error *error)
1957 {
1958         struct rte_flow_action_rss *rss_conf;
1959         struct hns3_rss_conf_ele *rss_rule;
1960         struct rte_flow_query_count *qc;
1961         int ret;
1962
1963         if (!flow->rule)
1964                 return rte_flow_error_set(error, EINVAL,
1965                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "invalid rule");
1966
1967         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1968                 switch (actions->type) {
1969                 case RTE_FLOW_ACTION_TYPE_VOID:
1970                         break;
1971                 case RTE_FLOW_ACTION_TYPE_COUNT:
1972                         qc = (struct rte_flow_query_count *)data;
1973                         ret = hns3_counter_query(dev, flow, qc, error);
1974                         if (ret)
1975                                 return ret;
1976                         break;
1977                 case RTE_FLOW_ACTION_TYPE_RSS:
1978                         if (flow->filter_type != RTE_ETH_FILTER_HASH) {
1979                                 return rte_flow_error_set(error, ENOTSUP,
1980                                         RTE_FLOW_ERROR_TYPE_ACTION,
1981                                         actions, "action is not supported");
1982                         }
1983                         rss_conf = (struct rte_flow_action_rss *)data;
1984                         rss_rule = (struct hns3_rss_conf_ele *)flow->rule;
1985                         rte_memcpy(rss_conf, &rss_rule->filter_info.conf,
1986                                    sizeof(struct rte_flow_action_rss));
1987                         break;
1988                 default:
1989                         return rte_flow_error_set(error, ENOTSUP,
1990                                 RTE_FLOW_ERROR_TYPE_ACTION,
1991                                 actions, "action is not supported");
1992                 }
1993         }
1994
1995         return 0;
1996 }
1997
1998 static const struct rte_flow_ops hns3_flow_ops = {
1999         .validate = hns3_flow_validate,
2000         .create = hns3_flow_create,
2001         .destroy = hns3_flow_destroy,
2002         .flush = hns3_flow_flush,
2003         .query = hns3_flow_query,
2004         .isolate = NULL,
2005 };
2006
2007 /*
2008  * The entry of flow API.
2009  * @param dev
2010  *   Pointer to Ethernet device.
2011  * @return
2012  *   0 on success, a negative errno value otherwise is set.
2013  */
2014 int
2015 hns3_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type,
2016                      enum rte_filter_op filter_op, void *arg)
2017 {
2018         struct hns3_hw *hw;
2019         int ret = 0;
2020
2021         hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2022         switch (filter_type) {
2023         case RTE_ETH_FILTER_GENERIC:
2024                 if (filter_op != RTE_ETH_FILTER_GET)
2025                         return -EINVAL;
2026                 if (hw->adapter_state >= HNS3_NIC_CLOSED)
2027                         return -ENODEV;
2028                 *(const void **)arg = &hns3_flow_ops;
2029                 break;
2030         default:
2031                 hns3_err(hw, "Filter type (%d) not supported", filter_type);
2032                 ret = -EOPNOTSUPP;
2033                 break;
2034         }
2035
2036         return ret;
2037 }