remove extra parentheses in return statement
[dpdk.git] / drivers / net / mlx5 / mlx5_rxq.c
index 6d8f7d2..ebbe186 100644 (file)
 #include "mlx5_utils.h"
 #include "mlx5_defs.h"
 
+/* Initialization data for hash RX queues. */
+const struct hash_rxq_init hash_rxq_init[] = {
+       [HASH_RXQ_TCPV4] = {
+               .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 |
+                               IBV_EXP_RX_HASH_DST_IPV4 |
+                               IBV_EXP_RX_HASH_SRC_PORT_TCP |
+                               IBV_EXP_RX_HASH_DST_PORT_TCP),
+               .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_TCP,
+               .flow_priority = 0,
+               .flow_spec.tcp_udp = {
+                       .type = IBV_EXP_FLOW_SPEC_TCP,
+                       .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
+               },
+               .underlayer = &hash_rxq_init[HASH_RXQ_IPV4],
+       },
+       [HASH_RXQ_UDPV4] = {
+               .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 |
+                               IBV_EXP_RX_HASH_DST_IPV4 |
+                               IBV_EXP_RX_HASH_SRC_PORT_UDP |
+                               IBV_EXP_RX_HASH_DST_PORT_UDP),
+               .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_UDP,
+               .flow_priority = 0,
+               .flow_spec.tcp_udp = {
+                       .type = IBV_EXP_FLOW_SPEC_UDP,
+                       .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
+               },
+               .underlayer = &hash_rxq_init[HASH_RXQ_IPV4],
+       },
+       [HASH_RXQ_IPV4] = {
+               .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 |
+                               IBV_EXP_RX_HASH_DST_IPV4),
+               .dpdk_rss_hf = (ETH_RSS_IPV4 |
+                               ETH_RSS_FRAG_IPV4),
+               .flow_priority = 1,
+               .flow_spec.ipv4 = {
+                       .type = IBV_EXP_FLOW_SPEC_IPV4,
+                       .size = sizeof(hash_rxq_init[0].flow_spec.ipv4),
+               },
+               .underlayer = &hash_rxq_init[HASH_RXQ_ETH],
+       },
+#ifdef HAVE_FLOW_SPEC_IPV6
+       [HASH_RXQ_TCPV6] = {
+               .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 |
+                               IBV_EXP_RX_HASH_DST_IPV6 |
+                               IBV_EXP_RX_HASH_SRC_PORT_TCP |
+                               IBV_EXP_RX_HASH_DST_PORT_TCP),
+               .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_TCP,
+               .flow_priority = 0,
+               .flow_spec.tcp_udp = {
+                       .type = IBV_EXP_FLOW_SPEC_TCP,
+                       .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
+               },
+               .underlayer = &hash_rxq_init[HASH_RXQ_IPV6],
+       },
+       [HASH_RXQ_UDPV6] = {
+               .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 |
+                               IBV_EXP_RX_HASH_DST_IPV6 |
+                               IBV_EXP_RX_HASH_SRC_PORT_UDP |
+                               IBV_EXP_RX_HASH_DST_PORT_UDP),
+               .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_UDP,
+               .flow_priority = 0,
+               .flow_spec.tcp_udp = {
+                       .type = IBV_EXP_FLOW_SPEC_UDP,
+                       .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
+               },
+               .underlayer = &hash_rxq_init[HASH_RXQ_IPV6],
+       },
+       [HASH_RXQ_IPV6] = {
+               .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 |
+                               IBV_EXP_RX_HASH_DST_IPV6),
+               .dpdk_rss_hf = (ETH_RSS_IPV6 |
+                               ETH_RSS_FRAG_IPV6),
+               .flow_priority = 1,
+               .flow_spec.ipv6 = {
+                       .type = IBV_EXP_FLOW_SPEC_IPV6,
+                       .size = sizeof(hash_rxq_init[0].flow_spec.ipv6),
+               },
+               .underlayer = &hash_rxq_init[HASH_RXQ_ETH],
+       },
+#endif /* HAVE_FLOW_SPEC_IPV6 */
+       [HASH_RXQ_ETH] = {
+               .hash_fields = 0,
+               .dpdk_rss_hf = 0,
+               .flow_priority = 2,
+               .flow_spec.eth = {
+                       .type = IBV_EXP_FLOW_SPEC_ETH,
+                       .size = sizeof(hash_rxq_init[0].flow_spec.eth),
+               },
+               .underlayer = NULL,
+       },
+};
+
+/* Number of entries in hash_rxq_init[]. */
+const unsigned int hash_rxq_init_n = RTE_DIM(hash_rxq_init);
+
+/* Initialization data for hash RX queue indirection tables. */
+static const struct ind_table_init ind_table_init[] = {
+       {
+               .max_size = -1u, /* Superseded by HW limitations. */
+               .hash_types =
+                       1 << HASH_RXQ_TCPV4 |
+                       1 << HASH_RXQ_UDPV4 |
+                       1 << HASH_RXQ_IPV4 |
+#ifdef HAVE_FLOW_SPEC_IPV6
+                       1 << HASH_RXQ_TCPV6 |
+                       1 << HASH_RXQ_UDPV6 |
+                       1 << HASH_RXQ_IPV6 |
+#endif /* HAVE_FLOW_SPEC_IPV6 */
+                       0,
+#ifdef HAVE_FLOW_SPEC_IPV6
+               .hash_types_n = 6,
+#else /* HAVE_FLOW_SPEC_IPV6 */
+               .hash_types_n = 3,
+#endif /* HAVE_FLOW_SPEC_IPV6 */
+       },
+       {
+               .max_size = 1,
+               .hash_types = 1 << HASH_RXQ_ETH,
+               .hash_types_n = 1,
+       },
+};
+
+#define IND_TABLE_INIT_N RTE_DIM(ind_table_init)
+
 /* Default RSS hash key also used for ConnectX-3. */
-static uint8_t hash_rxq_default_key[] = {
+uint8_t rss_hash_default_key[] = {
        0x2c, 0xc6, 0x81, 0xd1,
        0x5b, 0xdb, 0xf4, 0xf7,
        0xfc, 0xa2, 0x83, 0x19,
@@ -78,24 +202,136 @@ static uint8_t hash_rxq_default_key[] = {
        0xfc, 0x1f, 0xdc, 0x2a,
 };
 
+/* Length of the default RSS hash key. */
+const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key);
+
 /**
- * Return nearest power of two above input value.
+ * Populate flow steering rule for a given hash RX queue type using
+ * information from hash_rxq_init[]. Nothing is written to flow_attr when
+ * flow_attr_size is not large enough, but the required size is still returned.
  *
- * @param v
- *   Input value.
+ * @param[in] hash_rxq
+ *   Pointer to hash RX queue.
+ * @param[out] flow_attr
+ *   Pointer to flow attribute structure to fill. Note that the allocated
+ *   area must be larger and large enough to hold all flow specifications.
+ * @param flow_attr_size
+ *   Entire size of flow_attr and trailing room for flow specifications.
  *
  * @return
- *   Nearest power of two above input value.
+ *   Total size of the flow attribute buffer. No errors are defined.
+ */
+size_t
+hash_rxq_flow_attr(const struct hash_rxq *hash_rxq,
+                  struct ibv_exp_flow_attr *flow_attr,
+                  size_t flow_attr_size)
+{
+       size_t offset = sizeof(*flow_attr);
+       enum hash_rxq_type type = hash_rxq->type;
+       const struct hash_rxq_init *init = &hash_rxq_init[type];
+
+       assert(hash_rxq->priv != NULL);
+       assert((size_t)type < RTE_DIM(hash_rxq_init));
+       do {
+               offset += init->flow_spec.hdr.size;
+               init = init->underlayer;
+       } while (init != NULL);
+       if (offset > flow_attr_size)
+               return offset;
+       flow_attr_size = offset;
+       init = &hash_rxq_init[type];
+       *flow_attr = (struct ibv_exp_flow_attr){
+               .type = IBV_EXP_FLOW_ATTR_NORMAL,
+               .priority = init->flow_priority,
+               .num_of_specs = 0,
+               .port = hash_rxq->priv->port,
+               .flags = 0,
+       };
+       do {
+               offset -= init->flow_spec.hdr.size;
+               memcpy((void *)((uintptr_t)flow_attr + offset),
+                      &init->flow_spec,
+                      init->flow_spec.hdr.size);
+               ++flow_attr->num_of_specs;
+               init = init->underlayer;
+       } while (init != NULL);
+       return flow_attr_size;
+}
+
+/**
+ * Convert hash type position in indirection table initializer to
+ * hash RX queue type.
+ *
+ * @param table
+ *   Indirection table initializer.
+ * @param pos
+ *   Hash type position.
+ *
+ * @return
+ *   Hash RX queue type.
+ */
+static enum hash_rxq_type
+hash_rxq_type_from_pos(const struct ind_table_init *table, unsigned int pos)
+{
+       enum hash_rxq_type type = 0;
+
+       assert(pos < table->hash_types_n);
+       do {
+               if ((table->hash_types & (1 << type)) && (pos-- == 0))
+                       break;
+               ++type;
+       } while (1);
+       return type;
+}
+
+/**
+ * Filter out disabled hash RX queue types from ind_table_init[].
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @param[out] table
+ *   Output table.
+ *
+ * @return
+ *   Number of table entries.
  */
 static unsigned int
-log2above(unsigned int v)
+priv_make_ind_table_init(struct priv *priv,
+                        struct ind_table_init (*table)[IND_TABLE_INIT_N])
 {
-       unsigned int l;
-       unsigned int r;
+       uint64_t rss_hf;
+       unsigned int i;
+       unsigned int j;
+       unsigned int table_n = 0;
+       /* Mandatory to receive frames not handled by normal hash RX queues. */
+       unsigned int hash_types_sup = 1 << HASH_RXQ_ETH;
 
-       for (l = 0, r = 0; (v >> 1); ++l, v >>= 1)
-               r |= (v & 1);
-       return (l + r);
+       rss_hf = priv->dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
+       /* Process other protocols only if more than one queue. */
+       if (priv->rxqs_n > 1)
+               for (i = 0; (i != hash_rxq_init_n); ++i)
+                       if (rss_hf & hash_rxq_init[i].dpdk_rss_hf)
+                               hash_types_sup |= (1 << i);
+
+       /* Filter out entries whose protocols are not in the set. */
+       for (i = 0, j = 0; (i != IND_TABLE_INIT_N); ++i) {
+               unsigned int nb;
+               unsigned int h;
+
+               /* j is increased only if the table has valid protocols. */
+               assert(j <= i);
+               (*table)[j] = ind_table_init[i];
+               (*table)[j].hash_types &= hash_types_sup;
+               for (h = 0, nb = 0; (h != hash_rxq_init_n); ++h)
+                       if (((*table)[j].hash_types >> h) & 0x1)
+                               ++nb;
+               (*table)[i].hash_types_n = nb;
+               if (nb) {
+                       ++table_n;
+                       ++j;
+               }
+       }
+       return table_n;
 }
 
 /**
@@ -110,21 +346,20 @@ log2above(unsigned int v)
 int
 priv_create_hash_rxqs(struct priv *priv)
 {
-       static const uint64_t rss_hash_table[] = {
-               /* TCPv4. */
-               (IBV_EXP_RX_HASH_SRC_IPV4 | IBV_EXP_RX_HASH_DST_IPV4 |
-                IBV_EXP_RX_HASH_SRC_PORT_TCP | IBV_EXP_RX_HASH_DST_PORT_TCP),
-               /* UDPv4. */
-               (IBV_EXP_RX_HASH_SRC_IPV4 | IBV_EXP_RX_HASH_DST_IPV4 |
-                IBV_EXP_RX_HASH_SRC_PORT_UDP | IBV_EXP_RX_HASH_DST_PORT_UDP),
-               /* Other IPv4. */
-               (IBV_EXP_RX_HASH_SRC_IPV4 | IBV_EXP_RX_HASH_DST_IPV4),
-               /* None, used for everything else. */
-               0,
-       };
+       struct ibv_exp_wq *wqs[priv->reta_idx_n];
+       struct ind_table_init ind_table_init[IND_TABLE_INIT_N];
+       unsigned int ind_tables_n =
+               priv_make_ind_table_init(priv, &ind_table_init);
+       unsigned int hash_rxqs_n = 0;
+       struct hash_rxq (*hash_rxqs)[] = NULL;
+       struct ibv_exp_rwq_ind_table *(*ind_tables)[] = NULL;
+       unsigned int i;
+       unsigned int j;
+       unsigned int k;
+       int err = 0;
 
-       DEBUG("allocating hash RX queues for %u WQs", priv->rxqs_n);
-       assert(priv->ind_table == NULL);
+       assert(priv->ind_tables == NULL);
+       assert(priv->ind_tables_n == 0);
        assert(priv->hash_rxqs == NULL);
        assert(priv->hash_rxqs_n == 0);
        assert(priv->pd != NULL);
@@ -132,47 +367,54 @@ priv_create_hash_rxqs(struct priv *priv)
        if (priv->rxqs_n == 0)
                return EINVAL;
        assert(priv->rxqs != NULL);
-
-       /* FIXME: large data structures are allocated on the stack. */
-       unsigned int wqs_n = (1 << log2above(priv->rxqs_n));
-       struct ibv_exp_wq *wqs[wqs_n];
-       struct ibv_exp_rwq_ind_table_init_attr ind_init_attr = {
-               .pd = priv->pd,
-               .log_ind_tbl_size = log2above(priv->rxqs_n),
-               .ind_tbl = wqs,
-               .comp_mask = 0,
-       };
-       struct ibv_exp_rwq_ind_table *ind_table = NULL;
-       /* If only one RX queue is configured, RSS is not needed and a single
-        * empty hash entry is used (last rss_hash_table[] entry). */
-       unsigned int hash_rxqs_n =
-               ((priv->rxqs_n == 1) ? 1 : RTE_DIM(rss_hash_table));
-       struct hash_rxq (*hash_rxqs)[hash_rxqs_n] = NULL;
-       unsigned int i;
-       unsigned int j;
-       int err = 0;
-
-       if (wqs_n < priv->rxqs_n) {
-               ERROR("cannot handle this many RX queues (%u)", priv->rxqs_n);
-               err = ERANGE;
-               goto error;
+       if (ind_tables_n == 0) {
+               ERROR("all hash RX queue types have been filtered out,"
+                     " indirection table cannot be created");
+               return EINVAL;
+       }
+       if (priv->rxqs_n & (priv->rxqs_n - 1)) {
+               INFO("%u RX queues are configured, consider rounding this"
+                    " number to the next power of two for better balancing",
+                    priv->rxqs_n);
+               DEBUG("indirection table extended to assume %u WQs",
+                     priv->reta_idx_n);
        }
-       if (wqs_n != priv->rxqs_n)
-               WARN("%u RX queues are configured, consider rounding this"
-                    " number to the next power of two (%u) for optimal"
-                    " performance",
-                    priv->rxqs_n, wqs_n);
-       /* When the number of RX queues is not a power of two, the remaining
-        * table entries are padded with reused WQs and hashes are not spread
-        * uniformly. */
-       for (i = 0, j = 0; (i != wqs_n); ++i) {
-               wqs[i] = (*priv->rxqs)[j]->wq;
-               if (++j == priv->rxqs_n)
-                       j = 0;
+       for (i = 0; (i != priv->reta_idx_n); ++i)
+               wqs[i] = (*priv->rxqs)[(*priv->reta_idx)[i]]->wq;
+       /* Get number of hash RX queues to configure. */
+       for (i = 0, hash_rxqs_n = 0; (i != ind_tables_n); ++i)
+               hash_rxqs_n += ind_table_init[i].hash_types_n;
+       DEBUG("allocating %u hash RX queues for %u WQs, %u indirection tables",
+             hash_rxqs_n, priv->rxqs_n, ind_tables_n);
+       /* Create indirection tables. */
+       ind_tables = rte_calloc(__func__, ind_tables_n,
+                               sizeof((*ind_tables)[0]), 0);
+       if (ind_tables == NULL) {
+               err = ENOMEM;
+               ERROR("cannot allocate indirection tables container: %s",
+                     strerror(err));
+               goto error;
        }
-       errno = 0;
-       ind_table = ibv_exp_create_rwq_ind_table(priv->ctx, &ind_init_attr);
-       if (ind_table == NULL) {
+       for (i = 0; (i != ind_tables_n); ++i) {
+               struct ibv_exp_rwq_ind_table_init_attr ind_init_attr = {
+                       .pd = priv->pd,
+                       .log_ind_tbl_size = 0, /* Set below. */
+                       .ind_tbl = wqs,
+                       .comp_mask = 0,
+               };
+               unsigned int ind_tbl_size = ind_table_init[i].max_size;
+               struct ibv_exp_rwq_ind_table *ind_table;
+
+               if (priv->reta_idx_n < ind_tbl_size)
+                       ind_tbl_size = priv->reta_idx_n;
+               ind_init_attr.log_ind_tbl_size = log2above(ind_tbl_size);
+               errno = 0;
+               ind_table = ibv_exp_create_rwq_ind_table(priv->ctx,
+                                                        &ind_init_attr);
+               if (ind_table != NULL) {
+                       (*ind_tables)[i] = ind_table;
+                       continue;
+               }
                /* Not clear whether errno is set. */
                err = (errno ? errno : EINVAL);
                ERROR("RX indirection table creation failed with error %d: %s",
@@ -180,24 +422,32 @@ priv_create_hash_rxqs(struct priv *priv)
                goto error;
        }
        /* Allocate array that holds hash RX queues and related data. */
-       hash_rxqs = rte_malloc(__func__, sizeof(*hash_rxqs), 0);
+       hash_rxqs = rte_calloc(__func__, hash_rxqs_n,
+                              sizeof((*hash_rxqs)[0]), 0);
        if (hash_rxqs == NULL) {
                err = ENOMEM;
                ERROR("cannot allocate hash RX queues container: %s",
                      strerror(err));
                goto error;
        }
-       for (i = 0, j = (RTE_DIM(rss_hash_table) - hash_rxqs_n);
-            (j != RTE_DIM(rss_hash_table));
-            ++i, ++j) {
+       for (i = 0, j = 0, k = 0;
+            ((i != hash_rxqs_n) && (j != ind_tables_n));
+            ++i) {
                struct hash_rxq *hash_rxq = &(*hash_rxqs)[i];
-
+               enum hash_rxq_type type =
+                       hash_rxq_type_from_pos(&ind_table_init[j], k);
+               struct rte_eth_rss_conf *priv_rss_conf =
+                       (*priv->rss_conf)[type];
                struct ibv_exp_rx_hash_conf hash_conf = {
                        .rx_hash_function = IBV_EXP_RX_HASH_FUNC_TOEPLITZ,
-                       .rx_hash_key_len = sizeof(hash_rxq_default_key),
-                       .rx_hash_key = hash_rxq_default_key,
-                       .rx_hash_fields_mask = rss_hash_table[j],
-                       .rwq_ind_tbl = ind_table,
+                       .rx_hash_key_len = (priv_rss_conf ?
+                                           priv_rss_conf->rss_key_len :
+                                           rss_hash_default_key_len),
+                       .rx_hash_key = (priv_rss_conf ?
+                                       priv_rss_conf->rss_key :
+                                       rss_hash_default_key),
+                       .rx_hash_fields_mask = hash_rxq_init[type].hash_fields,
+                       .rwq_ind_tbl = (*ind_tables)[j],
                };
                struct ibv_exp_qp_init_attr qp_init_attr = {
                        .max_inl_recv = 0, /* Currently not supported. */
@@ -209,30 +459,54 @@ priv_create_hash_rxqs(struct priv *priv)
                        .port_num = priv->port,
                };
 
+               DEBUG("using indirection table %u for hash RX queue %u type %d",
+                     j, i, type);
                *hash_rxq = (struct hash_rxq){
                        .priv = priv,
                        .qp = ibv_exp_create_qp(priv->ctx, &qp_init_attr),
+                       .type = type,
                };
                if (hash_rxq->qp == NULL) {
                        err = (errno ? errno : EINVAL);
                        ERROR("Hash RX QP creation failure: %s",
                              strerror(err));
-                       while (i) {
-                               hash_rxq = &(*hash_rxqs)[--i];
-                               claim_zero(ibv_destroy_qp(hash_rxq->qp));
-                       }
                        goto error;
                }
+               if (++k < ind_table_init[j].hash_types_n)
+                       continue;
+               /* Switch to the next indirection table and reset hash RX
+                * queue type array index. */
+               ++j;
+               k = 0;
        }
-       priv->ind_table = ind_table;
+       priv->ind_tables = ind_tables;
+       priv->ind_tables_n = ind_tables_n;
        priv->hash_rxqs = hash_rxqs;
        priv->hash_rxqs_n = hash_rxqs_n;
        assert(err == 0);
        return 0;
 error:
-       rte_free(hash_rxqs);
-       if (ind_table != NULL)
-               claim_zero(ibv_exp_destroy_rwq_ind_table(ind_table));
+       if (hash_rxqs != NULL) {
+               for (i = 0; (i != hash_rxqs_n); ++i) {
+                       struct ibv_qp *qp = (*hash_rxqs)[i].qp;
+
+                       if (qp == NULL)
+                               continue;
+                       claim_zero(ibv_destroy_qp(qp));
+               }
+               rte_free(hash_rxqs);
+       }
+       if (ind_tables != NULL) {
+               for (j = 0; (j != ind_tables_n); ++j) {
+                       struct ibv_exp_rwq_ind_table *ind_table =
+                               (*ind_tables)[j];
+
+                       if (ind_table == NULL)
+                               continue;
+                       claim_zero(ibv_exp_destroy_rwq_ind_table(ind_table));
+               }
+               rte_free(ind_tables);
+       }
        return err;
 }
 
@@ -250,7 +524,7 @@ priv_destroy_hash_rxqs(struct priv *priv)
        DEBUG("destroying %u hash RX queues", priv->hash_rxqs_n);
        if (priv->hash_rxqs_n == 0) {
                assert(priv->hash_rxqs == NULL);
-               assert(priv->ind_table == NULL);
+               assert(priv->ind_tables == NULL);
                return;
        }
        for (i = 0; (i != priv->hash_rxqs_n); ++i) {
@@ -270,8 +544,45 @@ priv_destroy_hash_rxqs(struct priv *priv)
        priv->hash_rxqs_n = 0;
        rte_free(priv->hash_rxqs);
        priv->hash_rxqs = NULL;
-       claim_zero(ibv_exp_destroy_rwq_ind_table(priv->ind_table));
-       priv->ind_table = NULL;
+       for (i = 0; (i != priv->ind_tables_n); ++i) {
+               struct ibv_exp_rwq_ind_table *ind_table =
+                       (*priv->ind_tables)[i];
+
+               assert(ind_table != NULL);
+               claim_zero(ibv_exp_destroy_rwq_ind_table(ind_table));
+       }
+       priv->ind_tables_n = 0;
+       rte_free(priv->ind_tables);
+       priv->ind_tables = NULL;
+}
+
+/**
+ * Check whether a given flow type is allowed.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @param type
+ *   Flow type to check.
+ *
+ * @return
+ *   Nonzero if the given flow type is allowed.
+ */
+int
+priv_allow_flow_type(struct priv *priv, enum hash_rxq_flow_type type)
+{
+       /* Only FLOW_TYPE_PROMISC is allowed when promiscuous mode
+        * has been requested. */
+       if (priv->promisc_req)
+               return type == HASH_RXQ_FLOW_TYPE_PROMISC;
+       switch (type) {
+       case HASH_RXQ_FLOW_TYPE_PROMISC:
+               return !!priv->promisc_req;
+       case HASH_RXQ_FLOW_TYPE_ALLMULTI:
+               return !!priv->allmulti_req;
+       case HASH_RXQ_FLOW_TYPE_MAC:
+               return 1;
+       }
+       return 0;
 }
 
 /**