net/hns3: fix return value for unsupported tuple
[dpdk.git] / drivers / net / enic / enic_fm_flow.c
index cd364ee..c87d3af 100644 (file)
@@ -8,6 +8,7 @@
 #include <ethdev_driver.h>
 #include <rte_flow_driver.h>
 #include <rte_ether.h>
+#include <rte_geneve.h>
 #include <rte_hash.h>
 #include <rte_jhash.h>
 #include <rte_ip.h>
@@ -22,9 +23,6 @@
 #define IP_DEFTTL  64   /* from RFC 1340. */
 #define IP6_VTC_FLOW 0x60000000
 
-/* Highest Item type supported by Flowman */
-#define FM_MAX_ITEM_TYPE RTE_FLOW_ITEM_TYPE_VXLAN
-
 /* Up to 1024 TCAM entries */
 #define FM_MAX_TCAM_TABLE_SIZE 1024
 
@@ -208,6 +206,7 @@ struct copy_item_args {
        const struct rte_flow_item *item;
        struct fm_tcam_match_entry *fm_tcam_entry;
        uint8_t header_level;
+       struct rte_flow_error *error;
 };
 
 /* functions for copying items into flowman match */
@@ -235,6 +234,10 @@ static enic_copy_item_fn enic_fm_copy_item_tcp;
 static enic_copy_item_fn enic_fm_copy_item_udp;
 static enic_copy_item_fn enic_fm_copy_item_vlan;
 static enic_copy_item_fn enic_fm_copy_item_vxlan;
+static enic_copy_item_fn enic_fm_copy_item_gtp;
+static enic_copy_item_fn enic_fm_copy_item_geneve;
+static enic_copy_item_fn enic_fm_copy_item_geneve_opt;
+static enic_copy_item_fn enic_fm_copy_item_ecpri;
 
 /* Ingress actions */
 static const enum rte_flow_action_type enic_fm_supported_ig_actions[] = {
@@ -346,6 +349,59 @@ static const struct enic_fm_items enic_fm_items[] = {
                               RTE_FLOW_ITEM_TYPE_END,
                },
        },
+       [RTE_FLOW_ITEM_TYPE_GTP] = {
+               .copy_item = enic_fm_copy_item_gtp,
+               .valid_start_item = 0,
+               .prev_items = (const enum rte_flow_item_type[]) {
+                              RTE_FLOW_ITEM_TYPE_UDP,
+                              RTE_FLOW_ITEM_TYPE_END,
+               },
+       },
+       [RTE_FLOW_ITEM_TYPE_GTPC] = {
+               .copy_item = enic_fm_copy_item_gtp,
+               .valid_start_item = 1,
+               .prev_items = (const enum rte_flow_item_type[]) {
+                              RTE_FLOW_ITEM_TYPE_UDP,
+                              RTE_FLOW_ITEM_TYPE_END,
+               },
+       },
+       [RTE_FLOW_ITEM_TYPE_GTPU] = {
+               .copy_item = enic_fm_copy_item_gtp,
+               .valid_start_item = 1,
+               .prev_items = (const enum rte_flow_item_type[]) {
+                              RTE_FLOW_ITEM_TYPE_UDP,
+                              RTE_FLOW_ITEM_TYPE_END,
+               },
+       },
+       [RTE_FLOW_ITEM_TYPE_GENEVE] = {
+               .copy_item = enic_fm_copy_item_geneve,
+               .valid_start_item = 1,
+               .prev_items = (const enum rte_flow_item_type[]) {
+                              RTE_FLOW_ITEM_TYPE_ETH,
+                              RTE_FLOW_ITEM_TYPE_IPV4,
+                              RTE_FLOW_ITEM_TYPE_IPV6,
+                              RTE_FLOW_ITEM_TYPE_UDP,
+                              RTE_FLOW_ITEM_TYPE_END,
+               },
+       },
+       [RTE_FLOW_ITEM_TYPE_GENEVE_OPT] = {
+               .copy_item = enic_fm_copy_item_geneve_opt,
+               .valid_start_item = 1,
+               /* Can match at most 1 option */
+               .prev_items = (const enum rte_flow_item_type[]) {
+                              RTE_FLOW_ITEM_TYPE_GENEVE,
+                              RTE_FLOW_ITEM_TYPE_END,
+               },
+       },
+       [RTE_FLOW_ITEM_TYPE_ECPRI] = {
+               .copy_item = enic_fm_copy_item_ecpri,
+               .valid_start_item = 1,
+               .prev_items = (const enum rte_flow_item_type[]) {
+                              RTE_FLOW_ITEM_TYPE_ETH,
+                              RTE_FLOW_ITEM_TYPE_UDP,
+                              RTE_FLOW_ITEM_TYPE_END,
+               },
+       },
 };
 
 static int
@@ -629,6 +685,263 @@ enic_fm_copy_item_vxlan(struct copy_item_args *arg)
        return 0;
 }
 
+static int
+enic_fm_copy_item_gtp(struct copy_item_args *arg)
+{
+       const struct rte_flow_item *item = arg->item;
+       const struct rte_flow_item_gtp *spec = item->spec;
+       const struct rte_flow_item_gtp *mask = item->mask;
+       struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
+       struct fm_header_set *fm_data, *fm_mask;
+       int off;
+       uint16_t udp_gtp_uc_port_be = 0;
+
+       ENICPMD_FUNC_TRACE();
+       /* Only 2 header levels (outer and inner) allowed */
+       if (arg->header_level > 0)
+               return -EINVAL;
+
+       fm_data = &entry->ftm_data.fk_hdrset[0];
+       fm_mask = &entry->ftm_mask.fk_hdrset[0];
+
+       switch (item->type) {
+       case RTE_FLOW_ITEM_TYPE_GTP:
+       {
+               /* For vanilla GTP, the UDP destination port must be specified
+                * but value of the port is not enforced here.
+                */
+               if (!(fm_data->fk_metadata & FKM_UDP) ||
+                   !(fm_data->fk_header_select & FKH_UDP) ||
+                   fm_data->l4.udp.fk_dest == 0)
+                       return -EINVAL;
+               if (!(fm_mask->fk_metadata & FKM_UDP) ||
+                   !(fm_mask->fk_header_select & FKH_UDP) ||
+                   fm_mask->l4.udp.fk_dest != 0xFFFF)
+                       return -EINVAL;
+               break;
+       }
+       case RTE_FLOW_ITEM_TYPE_GTPC:
+       {
+               udp_gtp_uc_port_be = rte_cpu_to_be_16(RTE_GTPC_UDP_PORT);
+               break;
+       }
+       case RTE_FLOW_ITEM_TYPE_GTPU:
+       {
+               udp_gtp_uc_port_be = rte_cpu_to_be_16(RTE_GTPU_UDP_PORT);
+               break;
+       }
+       default:
+               RTE_ASSERT(0);
+       }
+
+       /* The GTP-C or GTP-U UDP destination port must be matched. */
+       if (udp_gtp_uc_port_be) {
+               if (fm_data->fk_metadata & FKM_UDP &&
+                   fm_data->fk_header_select & FKH_UDP &&
+                   fm_data->l4.udp.fk_dest != udp_gtp_uc_port_be)
+                       return -EINVAL;
+               if (fm_mask->fk_metadata & FKM_UDP &&
+                   fm_mask->fk_header_select & FKH_UDP &&
+                   fm_mask->l4.udp.fk_dest != 0xFFFF)
+                       return -EINVAL;
+
+               /* In any case, add match for GTP-C GTP-U UDP dst port */
+               fm_data->fk_metadata |= FKM_UDP;
+               fm_data->fk_header_select |= FKH_UDP;
+               fm_data->l4.udp.fk_dest = udp_gtp_uc_port_be;
+               fm_mask->fk_metadata |= FKM_UDP;
+               fm_mask->fk_header_select |= FKH_UDP;
+               fm_mask->l4.udp.fk_dest = 0xFFFF;
+       }
+
+       /* NIC does not support GTP tunnels. No Items are allowed after this.
+        * This prevents the specification of further items.
+        */
+       arg->header_level = 0;
+
+       /* Match all if no spec */
+       if (!spec)
+               return 0;
+       if (!mask)
+               mask = &rte_flow_item_gtp_mask;
+
+       /*
+        * Use the raw L4 buffer to match GTP as fm_header_set does not have
+        * GTP header. UDP dst port must be specific. Using the raw buffer
+        * does not affect such UDP item, since we skip UDP in the raw buffer.
+        */
+       fm_data->fk_header_select |= FKH_L4RAW;
+       fm_mask->fk_header_select |= FKH_L4RAW;
+       off = sizeof(fm_data->l4.udp);
+       memcpy(&fm_data->l4.rawdata[off], spec, sizeof(*spec));
+       memcpy(&fm_mask->l4.rawdata[off], mask, sizeof(*mask));
+       return 0;
+}
+
+static int
+enic_fm_copy_item_geneve(struct copy_item_args *arg)
+{
+       const struct rte_flow_item *item = arg->item;
+       const struct rte_flow_item_geneve *spec = item->spec;
+       const struct rte_flow_item_geneve *mask = item->mask;
+       struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
+       struct fm_header_set *fm_data, *fm_mask;
+       int off;
+
+       ENICPMD_FUNC_TRACE();
+       /* Only 2 header levels (outer and inner) allowed */
+       if (arg->header_level > 0)
+               return -EINVAL;
+
+       fm_data = &entry->ftm_data.fk_hdrset[0];
+       fm_mask = &entry->ftm_mask.fk_hdrset[0];
+       fm_data->fk_metadata |= FKM_GENEVE;
+       fm_mask->fk_metadata |= FKM_GENEVE;
+       /* items from here on out are inner header items, except options */
+       arg->header_level = 1;
+
+       /* Match all if no spec */
+       if (!spec)
+               return 0;
+       if (!mask)
+               mask = &rte_flow_item_geneve_mask;
+
+       /*
+        * Use the raw L4 buffer to match geneve as fm_header_set does
+        * not have geneve header. A UDP item may precede the geneve
+        * item. Using the raw buffer does not affect such UDP item,
+        * since we skip UDP in the raw buffer.
+        */
+       fm_data->fk_header_select |= FKH_L4RAW;
+       fm_mask->fk_header_select |= FKH_L4RAW;
+       off = sizeof(fm_data->l4.udp);
+       memcpy(&fm_data->l4.rawdata[off], spec, sizeof(struct rte_geneve_hdr));
+       memcpy(&fm_mask->l4.rawdata[off], mask, sizeof(struct rte_geneve_hdr));
+       return 0;
+}
+
+static int
+enic_fm_copy_item_geneve_opt(struct copy_item_args *arg)
+{
+       const struct rte_flow_item *item = arg->item;
+       const struct rte_flow_item_geneve_opt *spec = item->spec;
+       const struct rte_flow_item_geneve_opt *mask = item->mask;
+       struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
+       struct fm_header_set *fm_data, *fm_mask;
+       struct rte_geneve_hdr *geneve;
+       int off, len;
+
+       ENICPMD_FUNC_TRACE();
+       fm_data = &entry->ftm_data.fk_hdrset[0];
+       fm_mask = &entry->ftm_mask.fk_hdrset[0];
+       /* Match all if no spec */
+       if (!spec)
+               return 0;
+       if (!mask)
+               mask = &rte_flow_item_geneve_opt_mask;
+
+       if (spec->option_len > 0 &&
+           (spec->data == NULL || mask->data == NULL)) {
+               return rte_flow_error_set(arg->error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ITEM,
+                       NULL, "enic: geneve_opt unexpected null data");
+       }
+       /*
+        * Geneve item must already be in the raw buffer. Append the
+        * option pattern to it. There are two limitations.
+        * (1) Can match only the 1st option, the first one following Geneve
+        * (2) Geneve header must specify option length, as HW does not
+        *     have "has Geneve option" flag.
+        */
+       RTE_ASSERT((fm_data->fk_header_select & FKH_L4RAW) != 0);
+       RTE_ASSERT((fm_mask->fk_header_select & FKH_L4RAW) != 0);
+       off = sizeof(fm_data->l4.udp);
+       geneve = (struct rte_geneve_hdr *)&fm_data->l4.rawdata[off];
+       if (geneve->opt_len == 0) {
+               return rte_flow_error_set(arg->error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ITEM,
+                       NULL, "enic: geneve_opt requires non-zero geneve option length");
+       }
+       geneve = (struct rte_geneve_hdr *)&fm_mask->l4.rawdata[off];
+       if (geneve->opt_len == 0) {
+               return rte_flow_error_set(arg->error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ITEM,
+                       NULL, "enic: geneve_opt requires non-zero geneve option length mask");
+       }
+       off = sizeof(fm_data->l4.udp) + sizeof(struct rte_geneve_hdr);
+       if (off + (spec->option_len + 1) * 4 > FM_LAYER_SIZE) {
+               return rte_flow_error_set(arg->error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ITEM,
+                       NULL, "enic: geneve_opt too large");
+       }
+       /* Copy option header */
+       memcpy(&fm_data->l4.rawdata[off], spec, 4);
+       memcpy(&fm_mask->l4.rawdata[off], mask, 4);
+       /* Copy option data */
+       if (spec->option_len > 0) {
+               off += 4;
+               len = spec->option_len * 4;
+               memcpy(&fm_data->l4.rawdata[off], spec->data, len);
+               memcpy(&fm_mask->l4.rawdata[off], mask->data, len);
+       }
+       return 0;
+}
+
+/* Match eCPRI combined message header */
+static int
+enic_fm_copy_item_ecpri(struct copy_item_args *arg)
+{
+       const struct rte_flow_item *item = arg->item;
+       const struct rte_flow_item_ecpri *spec = item->spec;
+       const struct rte_flow_item_ecpri *mask = item->mask;
+       struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
+       struct fm_header_set *fm_data, *fm_mask;
+       uint8_t *fm_data_to, *fm_mask_to;
+
+       ENICPMD_FUNC_TRACE();
+
+       /* Tunneling not supported- only matching on inner eCPRI fields. */
+       if (arg->header_level > 0)
+               return -EINVAL;
+
+       /* Need both spec and mask */
+       if (!spec || !mask)
+               return -EINVAL;
+
+       fm_data = &entry->ftm_data.fk_hdrset[0];
+       fm_mask = &entry->ftm_mask.fk_hdrset[0];
+
+       /* eCPRI can only follow L2/VLAN layer if ethernet type is 0xAEFE. */
+       if (!(fm_data->fk_metadata & FKM_UDP) &&
+           (fm_mask->l2.eth.fk_ethtype != UINT16_MAX ||
+           rte_cpu_to_be_16(fm_data->l2.eth.fk_ethtype) !=
+           RTE_ETHER_TYPE_ECPRI))
+               return -EINVAL;
+
+       if (fm_data->fk_metadata & FKM_UDP) {
+               /* eCPRI on UDP */
+               fm_data->fk_header_select |= FKH_L4RAW;
+               fm_mask->fk_header_select |= FKH_L4RAW;
+               fm_data_to = &fm_data->l4.rawdata[sizeof(fm_data->l4.udp)];
+               fm_mask_to = &fm_mask->l4.rawdata[sizeof(fm_data->l4.udp)];
+       } else {
+               /* eCPRI directly after Etherent header */
+               fm_data->fk_header_select |= FKH_L3RAW;
+               fm_mask->fk_header_select |= FKH_L3RAW;
+               fm_data_to = &fm_data->l3.rawdata[0];
+               fm_mask_to = &fm_mask->l3.rawdata[0];
+       }
+
+       /*
+        * Use the raw L3 or L4 buffer to match eCPRI since fm_header_set does
+        * not have eCPRI header. Only 1st message header of PDU can be matched.
+        * "C" * bit ignored.
+        */
+       memcpy(fm_data_to, spec, sizeof(*spec));
+       memcpy(fm_mask_to, mask, sizeof(*mask));
+       return 0;
+}
+
 /*
  * Currently, raw pattern match is very limited. It is intended for matching
  * UDP tunnel header (e.g. vxlan or geneve).
@@ -863,22 +1176,38 @@ enic_fm_copy_entry(struct enic_flowman *fm,
 
                item_info = &enic_fm_items[item->type];
 
-               if (item->type > FM_MAX_ITEM_TYPE ||
+               if (item->type >= RTE_DIM(enic_fm_items) ||
                    item_info->copy_item == NULL) {
                        return rte_flow_error_set(error, ENOTSUP,
                                RTE_FLOW_ERROR_TYPE_ITEM,
                                NULL, "enic: unsupported item");
                }
-
+               /*
+                * Check vNIC feature dependencies. Geneve item needs
+                * Geneve offload feature
+                */
+               if (item->type == RTE_FLOW_ITEM_TYPE_GENEVE &&
+                   !fm->user_enic->geneve) {
+                       return rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               NULL, "enic: geneve not supported");
+               }
                /* check to see if item stacking is valid */
                if (!fm_item_stacking_valid(prev_item, item_info,
                                            is_first_item))
                        goto stacking_error;
 
                args.item = item;
+               args.error = error;
+               if (error)
+                       error->type = RTE_FLOW_ERROR_TYPE_NONE;
                ret = item_info->copy_item(&args);
-               if (ret)
+               if (ret) {
+                       /* If copy_item set the error, return that */
+                       if (error && error->type != RTE_FLOW_ERROR_TYPE_NONE)
+                               return ret;
                        goto item_not_supported;
+               }
                /* Going from outer to inner? Treat it as a new packet start */
                if (prev_header_level != args.header_level) {
                        prev_item = RTE_FLOW_ITEM_TYPE_END;
@@ -1242,6 +1571,35 @@ vf_egress_port_id_action(struct enic_flowman *fm,
        return 0;
 }
 
+static int
+enic_fm_check_transfer_dst(struct enic *enic, uint16_t dst_port_id,
+                          struct rte_eth_dev **dst_dev,
+                          struct rte_flow_error *error)
+{
+       struct rte_eth_dev *dev;
+
+       ENICPMD_LOG(DEBUG, "port id %u", dst_port_id);
+       if (!rte_eth_dev_is_valid_port(dst_port_id)) {
+               return rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ACTION,
+                       NULL, "invalid port_id");
+       }
+       dev = &rte_eth_devices[dst_port_id];
+       if (!dev_is_enic(dev)) {
+               return rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ACTION,
+                       NULL, "port_id is not enic");
+       }
+       if (enic->switch_domain_id != pmd_priv(dev)->switch_domain_id) {
+               return rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ACTION,
+                       NULL, "destination and source ports are not in the same switch domain");
+       }
+
+       *dst_dev = dev;
+       return 0;
+}
+
 /* Translate flow actions to flowman TCAM entry actions */
 static int
 enic_fm_copy_action(struct enic_flowman *fm,
@@ -1435,7 +1793,7 @@ enic_fm_copy_action(struct enic_flowman *fm,
                }
                case RTE_FLOW_ACTION_TYPE_PORT_ID: {
                        const struct rte_flow_action_port_id *port;
-                       struct rte_eth_dev *dev;
+                       struct rte_eth_dev *dev = NULL;
 
                        if (!ingress && (overlap & PORT_ID)) {
                                ENICPMD_LOG(DEBUG, "cannot have multiple egress PORT_ID actions");
@@ -1446,24 +1804,10 @@ enic_fm_copy_action(struct enic_flowman *fm,
                                vnic_h = enic->fm_vnic_handle; /* This port */
                                break;
                        }
-                       ENICPMD_LOG(DEBUG, "port id %u", port->id);
-                       if (!rte_eth_dev_is_valid_port(port->id)) {
-                               return rte_flow_error_set(error, EINVAL,
-                                       RTE_FLOW_ERROR_TYPE_ACTION,
-                                       NULL, "invalid port_id");
-                       }
-                       dev = &rte_eth_devices[port->id];
-                       if (!dev_is_enic(dev)) {
-                               return rte_flow_error_set(error, EINVAL,
-                                       RTE_FLOW_ERROR_TYPE_ACTION,
-                                       NULL, "port_id is not enic");
-                       }
-                       if (enic->switch_domain_id !=
-                           pmd_priv(dev)->switch_domain_id) {
-                               return rte_flow_error_set(error, EINVAL,
-                                       RTE_FLOW_ERROR_TYPE_ACTION,
-                                       NULL, "destination and source ports are not in the same switch domain");
-                       }
+                       ret = enic_fm_check_transfer_dst(enic, port->id, &dev,
+                                                        error);
+                       if (ret)
+                               return ret;
                        vnic_h = pmd_priv(dev)->fm_vnic_handle;
                        overlap |= PORT_ID;
                        /*
@@ -1560,6 +1904,48 @@ enic_fm_copy_action(struct enic_flowman *fm,
                        ovlan |= rte_be_to_cpu_16(vid->vlan_vid);
                        break;
                }
+               case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR: {
+                       const struct rte_flow_action_ethdev *ethdev;
+                       struct rte_eth_dev *dev = NULL;
+
+                       ethdev = actions->conf;
+                       ret = enic_fm_check_transfer_dst(enic, ethdev->port_id,
+                                                        &dev, error);
+                       if (ret)
+                               return ret;
+                       vnic_h = pmd_priv(dev)->fm_vnic_handle;
+                       overlap |= PORT_ID;
+                       /*
+                        * Action PORT_REPRESENTOR implies ingress destination.
+                        * Noting to do. We add an implicit stree at the
+                        * end if needed.
+                        */
+                       ingress = 1;
+                       break;
+               }
+               case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: {
+                       const struct rte_flow_action_ethdev *ethdev;
+                       struct rte_eth_dev *dev = NULL;
+
+                       if (overlap & PORT_ID) {
+                               ENICPMD_LOG(DEBUG, "cannot have multiple egress PORT_ID actions");
+                               goto unsupported;
+                       }
+                       ethdev = actions->conf;
+                       ret = enic_fm_check_transfer_dst(enic, ethdev->port_id,
+                                                        &dev, error);
+                       if (ret)
+                               return ret;
+                       vnic_h = pmd_priv(dev)->fm_vnic_handle;
+                       overlap |= PORT_ID;
+                       /* Action REPRESENTED_PORT: always egress destination */
+                       ingress = 0;
+                       ret = vf_egress_port_id_action(fm, dev, vnic_h, &fm_op,
+                               error);
+                       if (ret)
+                               return ret;
+                       break;
+               }
                default:
                        goto unsupported;
                }
@@ -1674,7 +2060,7 @@ enic_fm_dump_tcam_actions(const struct fm_action *fm_action)
        /* Remove trailing comma */
        if (buf[0])
                *(bp - 1) = '\0';
-       ENICPMD_LOG(DEBUG, "       Acions: %s", buf);
+       ENICPMD_LOG(DEBUG, "       Actions: %s", buf);
 }
 
 static int
@@ -2192,7 +2578,7 @@ enic_action_handle_get(struct enic_flowman *fm, struct fm_action *action_in,
        if (ret < 0 && ret != -ENOENT)
                return rte_flow_error_set(error, -ret,
                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-                                  NULL, "enic: rte_hash_lookup(aciton)");
+                                  NULL, "enic: rte_hash_lookup(action)");
 
        if (ret == -ENOENT) {
                /* Allocate a new action on the NIC. */
@@ -2200,11 +2586,11 @@ enic_action_handle_get(struct enic_flowman *fm, struct fm_action *action_in,
                memcpy(fma, action_in, sizeof(*fma));
 
                ah = calloc(1, sizeof(*ah));
-               memcpy(&ah->key, action_in, sizeof(struct fm_action));
                if (ah == NULL)
                        return rte_flow_error_set(error, ENOMEM,
                                           RTE_FLOW_ERROR_TYPE_HANDLE,
                                           NULL, "enic: calloc(fm-action)");
+               memcpy(&ah->key, action_in, sizeof(struct fm_action));
                args[0] = FM_ACTION_ALLOC;
                args[1] = fm->cmd.pa;
                ret = flowman_cmd(fm, args, 2);
@@ -2263,7 +2649,7 @@ __enic_fm_flow_add_entry(struct enic_flowman *fm,
 
        ENICPMD_FUNC_TRACE();
 
-       /* Get or create an aciton handle. */
+       /* Get or create an action handle. */
        ret = enic_action_handle_get(fm, action_in, error, &ah);
        if (ret)
                return ret;