mlx4: improve Rx performance with better prefetching
[dpdk.git] / drivers / net / mlx4 / mlx4.c
index fa9216f..e422a80 100644 (file)
@@ -203,9 +203,11 @@ struct rxq {
        } elts;
        unsigned int sp:1; /* Use scattered RX elements. */
        unsigned int csum:1; /* Enable checksum offloading. */
+       unsigned int csum_l2tun:1; /* Same for L2 tunnels. */
        uint32_t mb_len; /* Length of a mp-issued mbuf. */
        struct mlx4_rxq_stats stats; /* RX queue counters. */
        unsigned int socket; /* CPU socket ID for allocations. */
+       struct ibv_exp_res_domain *rd; /* Resource Domain. */
 };
 
 /* TX element. */
@@ -247,6 +249,7 @@ struct txq {
        linear_t (*elts_linear)[]; /* Linearized buffers. */
        struct ibv_mr *mr_linear; /* Memory Region for linearized buffers. */
        unsigned int socket; /* CPU socket ID for allocations. */
+       struct ibv_exp_res_domain *rd; /* Resource Domain. */
 };
 
 struct priv {
@@ -276,6 +279,7 @@ struct priv {
        unsigned int hw_tss:1; /* TSS is supported. */
        unsigned int hw_rss:1; /* RSS is supported. */
        unsigned int hw_csum:1; /* Checksum offload is supported. */
+       unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */
        unsigned int rss:1; /* RSS is enabled. */
        unsigned int vf:1; /* This is a VF device. */
 #ifdef INLINE_RECV
@@ -906,6 +910,17 @@ txq_cleanup(struct txq *txq)
                claim_zero(ibv_destroy_qp(txq->qp));
        if (txq->cq != NULL)
                claim_zero(ibv_destroy_cq(txq->cq));
+       if (txq->rd != NULL) {
+               struct ibv_exp_destroy_res_domain_attr attr = {
+                       .comp_mask = 0,
+               };
+
+               assert(txq->priv != NULL);
+               assert(txq->priv->ctx != NULL);
+               claim_zero(ibv_exp_destroy_res_domain(txq->priv->ctx,
+                                                     txq->rd,
+                                                     &attr));
+       }
        for (i = 0; (i != elemof(txq->mp2mr)); ++i) {
                if (txq->mp2mr[i].mp == NULL)
                        break;
@@ -1243,8 +1258,15 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                }
                /* Should we enable HW CKSUM offload */
                if (buf->ol_flags &
-                   (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))
+                   (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
                        send_flags |= IBV_EXP_QP_BURST_IP_CSUM;
+                       /* HW does not support checksum offloads at arbitrary
+                        * offsets but automatically recognizes the packet
+                        * type. For inner L3/L4 checksums, only VXLAN (UDP)
+                        * tunnels are currently supported. */
+                       if (RTE_ETH_IS_TUNNEL_PKT(buf->packet_type))
+                               send_flags |= IBV_EXP_QP_BURST_TUNNEL;
+               }
                if (likely(segs == 1)) {
                        uintptr_t addr;
                        uint32_t length;
@@ -1373,7 +1395,9 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
        };
        union {
                struct ibv_exp_query_intf_params params;
-               struct ibv_qp_init_attr init;
+               struct ibv_exp_qp_init_attr init;
+               struct ibv_exp_res_domain_init_attr rd;
+               struct ibv_exp_cq_init_attr cq;
                struct ibv_exp_qp_attr mod;
        } attr;
        enum ibv_exp_query_intf_status status;
@@ -1387,7 +1411,24 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
        }
        desc /= MLX4_PMD_SGE_WR_N;
        /* MRs will be registered in mp2mr[] later. */
-       tmpl.cq = ibv_create_cq(priv->ctx, desc, NULL, NULL, 0);
+       attr.rd = (struct ibv_exp_res_domain_init_attr){
+               .comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL |
+                             IBV_EXP_RES_DOMAIN_MSG_MODEL),
+               .thread_model = IBV_EXP_THREAD_SINGLE,
+               .msg_model = IBV_EXP_MSG_HIGH_BW,
+       };
+       tmpl.rd = ibv_exp_create_res_domain(priv->ctx, &attr.rd);
+       if (tmpl.rd == NULL) {
+               ret = ENOMEM;
+               ERROR("%p: RD creation failure: %s",
+                     (void *)dev, strerror(ret));
+               goto error;
+       }
+       attr.cq = (struct ibv_exp_cq_init_attr){
+               .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN,
+               .res_domain = tmpl.rd,
+       };
+       tmpl.cq = ibv_exp_create_cq(priv->ctx, desc, NULL, NULL, 0, &attr.cq);
        if (tmpl.cq == NULL) {
                ret = ENOMEM;
                ERROR("%p: CQ creation failure: %s",
@@ -1398,7 +1439,7 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
              priv->device_attr.max_qp_wr);
        DEBUG("priv->device_attr.max_sge is %d",
              priv->device_attr.max_sge);
-       attr.init = (struct ibv_qp_init_attr){
+       attr.init = (struct ibv_exp_qp_init_attr){
                /* CQ to be associated with the send queue. */
                .send_cq = tmpl.cq,
                /* CQ to be associated with the receive queue. */
@@ -1420,9 +1461,13 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
                .qp_type = IBV_QPT_RAW_PACKET,
                /* Do *NOT* enable this, completions events are managed per
                 * TX burst. */
-               .sq_sig_all = 0
+               .sq_sig_all = 0,
+               .pd = priv->pd,
+               .res_domain = tmpl.rd,
+               .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD |
+                             IBV_EXP_QP_INIT_ATTR_RES_DOMAIN),
        };
-       tmpl.qp = ibv_create_qp(priv->pd, &attr.init);
+       tmpl.qp = ibv_exp_create_qp(priv->ctx, &attr.init);
        if (tmpl.qp == NULL) {
                ret = (errno ? errno : EINVAL);
                ERROR("%p: QP creation failure: %s",
@@ -1483,6 +1528,13 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
                .intf_scope = IBV_EXP_INTF_GLOBAL,
                .intf = IBV_EXP_INTF_QP_BURST,
                .obj = tmpl.qp,
+#ifdef HAVE_EXP_QP_BURST_CREATE_DISABLE_ETH_LOOPBACK
+               /* MC loopback must be disabled when not using a VF. */
+               .family_flags =
+                       (!priv->vf ?
+                        IBV_EXP_QP_BURST_CREATE_DISABLE_ETH_LOOPBACK :
+                        0),
+#endif
        };
        tmpl.if_qp = ibv_exp_query_intf(priv->ctx, &attr.params, &status);
        if (tmpl.if_qp == NULL) {
@@ -2411,11 +2463,55 @@ rxq_cleanup(struct rxq *rxq)
        }
        if (rxq->cq != NULL)
                claim_zero(ibv_destroy_cq(rxq->cq));
+       if (rxq->rd != NULL) {
+               struct ibv_exp_destroy_res_domain_attr attr = {
+                       .comp_mask = 0,
+               };
+
+               assert(rxq->priv != NULL);
+               assert(rxq->priv->ctx != NULL);
+               claim_zero(ibv_exp_destroy_res_domain(rxq->priv->ctx,
+                                                     rxq->rd,
+                                                     &attr));
+       }
        if (rxq->mr != NULL)
                claim_zero(ibv_dereg_mr(rxq->mr));
        memset(rxq, 0, sizeof(*rxq));
 }
 
+/**
+ * Translate RX completion flags to packet type.
+ *
+ * @param flags
+ *   RX completion flags returned by poll_length_flags().
+ *
+ * @return
+ *   Packet type for struct rte_mbuf.
+ */
+static inline uint32_t
+rxq_cq_to_pkt_type(uint32_t flags)
+{
+       uint32_t pkt_type;
+
+       if (flags & IBV_EXP_CQ_RX_TUNNEL_PACKET)
+               pkt_type =
+                       TRANSPOSE(flags,
+                                 IBV_EXP_CQ_RX_OUTER_IPV4_PACKET, RTE_PTYPE_L3_IPV4) |
+                       TRANSPOSE(flags,
+                                 IBV_EXP_CQ_RX_OUTER_IPV6_PACKET, RTE_PTYPE_L3_IPV6) |
+                       TRANSPOSE(flags,
+                                 IBV_EXP_CQ_RX_IPV4_PACKET, RTE_PTYPE_INNER_L3_IPV4) |
+                       TRANSPOSE(flags,
+                                 IBV_EXP_CQ_RX_IPV6_PACKET, RTE_PTYPE_INNER_L3_IPV6);
+       else
+               pkt_type =
+                       TRANSPOSE(flags,
+                                 IBV_EXP_CQ_RX_IPV4_PACKET, RTE_PTYPE_L3_IPV4) |
+                       TRANSPOSE(flags,
+                                 IBV_EXP_CQ_RX_IPV6_PACKET, RTE_PTYPE_L3_IPV6);
+       return pkt_type;
+}
+
 /**
  * Translate RX completion flags to offload flags.
  *
@@ -2430,11 +2526,8 @@ rxq_cleanup(struct rxq *rxq)
 static inline uint32_t
 rxq_cq_to_ol_flags(const struct rxq *rxq, uint32_t flags)
 {
-       uint32_t ol_flags;
+       uint32_t ol_flags = 0;
 
-       ol_flags =
-               TRANSPOSE(flags, IBV_EXP_CQ_RX_IPV4_PACKET, PKT_RX_IPV4_HDR) |
-               TRANSPOSE(flags, IBV_EXP_CQ_RX_IPV6_PACKET, PKT_RX_IPV6_HDR);
        if (rxq->csum)
                ol_flags |=
                        TRANSPOSE(~flags,
@@ -2443,6 +2536,19 @@ rxq_cq_to_ol_flags(const struct rxq *rxq, uint32_t flags)
                        TRANSPOSE(~flags,
                                  IBV_EXP_CQ_RX_TCP_UDP_CSUM_OK,
                                  PKT_RX_L4_CKSUM_BAD);
+       /*
+        * PKT_RX_IP_CKSUM_BAD and PKT_RX_L4_CKSUM_BAD are used in place
+        * of PKT_RX_EIP_CKSUM_BAD because the latter is not functional
+        * (its value is 0).
+        */
+       if ((flags & IBV_EXP_CQ_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
+               ol_flags |=
+                       TRANSPOSE(~flags,
+                                 IBV_EXP_CQ_RX_OUTER_IP_CSUM_OK,
+                                 PKT_RX_IP_CKSUM_BAD) |
+                       TRANSPOSE(~flags,
+                                 IBV_EXP_CQ_RX_OUTER_TCP_UDP_CSUM_OK,
+                                 PKT_RX_L4_CKSUM_BAD);
        return ol_flags;
 }
 
@@ -2628,6 +2734,7 @@ mlx4_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                NB_SEGS(pkt_buf) = j;
                PORT(pkt_buf) = rxq->port_id;
                PKT_LEN(pkt_buf) = pkt_buf_len;
+               pkt_buf->packet_type = rxq_cq_to_pkt_type(flags);
                pkt_buf->ol_flags = rxq_cq_to_ol_flags(rxq, flags);
 
                /* Return packet. */
@@ -2713,6 +2820,12 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                assert(wr->num_sge == 1);
                assert(elts_head < rxq->elts_n);
                assert(rxq->elts_head < rxq->elts_n);
+               /*
+                * Fetch initial bytes of packet descriptor into a
+                * cacheline while allocating rep.
+                */
+               rte_prefetch0(seg);
+               rte_prefetch0(&seg->cacheline1);
                ret = rxq->if_cq->poll_length_flags(rxq->cq, NULL, NULL,
                                                    &flags);
                if (unlikely(ret < 0)) {
@@ -2750,11 +2863,6 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                if (ret == 0)
                        break;
                len = ret;
-               /*
-                * Fetch initial bytes of packet descriptor into a
-                * cacheline while allocating rep.
-                */
-               rte_prefetch0(seg);
                rep = __rte_mbuf_raw_alloc(rxq->mp);
                if (unlikely(rep == NULL)) {
                        /*
@@ -2788,6 +2896,7 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                NEXT(seg) = NULL;
                PKT_LEN(seg) = len;
                DATA_LEN(seg) = len;
+               seg->packet_type = rxq_cq_to_pkt_type(flags);
                seg->ol_flags = rxq_cq_to_ol_flags(rxq, flags);
 
                /* Return packet. */
@@ -2839,7 +2948,8 @@ repost:
  *   QP pointer or NULL in case of error.
  */
 static struct ibv_qp *
-rxq_setup_qp(struct priv *priv, struct ibv_cq *cq, uint16_t desc)
+rxq_setup_qp(struct priv *priv, struct ibv_cq *cq, uint16_t desc,
+            struct ibv_exp_res_domain *rd)
 {
        struct ibv_exp_qp_init_attr attr = {
                /* CQ to be associated with the send queue. */
@@ -2858,8 +2968,10 @@ rxq_setup_qp(struct priv *priv, struct ibv_cq *cq, uint16_t desc)
                                         MLX4_PMD_SGE_WR_N),
                },
                .qp_type = IBV_QPT_RAW_PACKET,
-               .comp_mask = IBV_EXP_QP_INIT_ATTR_PD,
+               .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD |
+                             IBV_EXP_QP_INIT_ATTR_RES_DOMAIN),
                .pd = priv->pd,
+               .res_domain = rd,
        };
 
 #ifdef INLINE_RECV
@@ -2889,7 +3001,7 @@ rxq_setup_qp(struct priv *priv, struct ibv_cq *cq, uint16_t desc)
  */
 static struct ibv_qp *
 rxq_setup_qp_rss(struct priv *priv, struct ibv_cq *cq, uint16_t desc,
-                int parent)
+                int parent, struct ibv_exp_res_domain *rd)
 {
        struct ibv_exp_qp_init_attr attr = {
                /* CQ to be associated with the send queue. */
@@ -2909,8 +3021,10 @@ rxq_setup_qp_rss(struct priv *priv, struct ibv_cq *cq, uint16_t desc,
                },
                .qp_type = IBV_QPT_RAW_PACKET,
                .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD |
+                             IBV_EXP_QP_INIT_ATTR_RES_DOMAIN |
                              IBV_EXP_QP_INIT_ATTR_QPG),
-               .pd = priv->pd
+               .pd = priv->pd,
+               .res_domain = rd,
        };
 
 #ifdef INLINE_RECV
@@ -2976,6 +3090,10 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq)
                tmpl.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
                rxq->csum = tmpl.csum;
        }
+       if (priv->hw_csum_l2tun) {
+               tmpl.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
+               rxq->csum_l2tun = tmpl.csum_l2tun;
+       }
        /* Enable scattered packets support for this queue if necessary. */
        if ((dev->data->dev_conf.rxmode.jumbo_frame) &&
            (dev->data->dev_conf.rxmode.max_rx_pkt_len >
@@ -3162,6 +3280,8 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
        struct ibv_exp_qp_attr mod;
        union {
                struct ibv_exp_query_intf_params params;
+               struct ibv_exp_cq_init_attr cq;
+               struct ibv_exp_res_domain_init_attr rd;
        } attr;
        enum ibv_exp_query_intf_status status;
        struct ibv_recv_wr *bad_wr;
@@ -3200,6 +3320,8 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
        /* Toggle RX checksum offload if hardware supports it. */
        if (priv->hw_csum)
                tmpl.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
+       if (priv->hw_csum_l2tun)
+               tmpl.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
        /* Enable scattered packets support for this queue if necessary. */
        if ((dev->data->dev_conf.rxmode.jumbo_frame) &&
            (dev->data->dev_conf.rxmode.max_rx_pkt_len >
@@ -3222,7 +3344,24 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
                goto error;
        }
 skip_mr:
-       tmpl.cq = ibv_create_cq(priv->ctx, desc, NULL, NULL, 0);
+       attr.rd = (struct ibv_exp_res_domain_init_attr){
+               .comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL |
+                             IBV_EXP_RES_DOMAIN_MSG_MODEL),
+               .thread_model = IBV_EXP_THREAD_SINGLE,
+               .msg_model = IBV_EXP_MSG_HIGH_BW,
+       };
+       tmpl.rd = ibv_exp_create_res_domain(priv->ctx, &attr.rd);
+       if (tmpl.rd == NULL) {
+               ret = ENOMEM;
+               ERROR("%p: RD creation failure: %s",
+                     (void *)dev, strerror(ret));
+               goto error;
+       }
+       attr.cq = (struct ibv_exp_cq_init_attr){
+               .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN,
+               .res_domain = tmpl.rd,
+       };
+       tmpl.cq = ibv_exp_create_cq(priv->ctx, desc, NULL, NULL, 0, &attr.cq);
        if (tmpl.cq == NULL) {
                ret = ENOMEM;
                ERROR("%p: CQ creation failure: %s",
@@ -3235,10 +3374,11 @@ skip_mr:
              priv->device_attr.max_sge);
 #ifdef RSS_SUPPORT
        if (priv->rss)
-               tmpl.qp = rxq_setup_qp_rss(priv, tmpl.cq, desc, parent);
+               tmpl.qp = rxq_setup_qp_rss(priv, tmpl.cq, desc, parent,
+                                          tmpl.rd);
        else
 #endif /* RSS_SUPPORT */
-               tmpl.qp = rxq_setup_qp(priv, tmpl.cq, desc);
+               tmpl.qp = rxq_setup_qp(priv, tmpl.cq, desc, tmpl.rd);
        if (tmpl.qp == NULL) {
                ret = (errno ? errno : EINVAL);
                ERROR("%p: QP creation failure: %s",
@@ -3680,6 +3820,7 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
 {
        struct priv *priv = dev->data->dev_private;
        unsigned int max;
+       char ifname[IF_NAMESIZE];
 
        priv_lock(priv);
        /* FIXME: we should ask the device for these values. */
@@ -3696,7 +3837,8 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
                max = 65535;
        info->max_rx_queues = max;
        info->max_tx_queues = max;
-       info->max_mac_addrs = elemof(priv->mac);
+       /* Last array entry is reserved for broadcast. */
+       info->max_mac_addrs = (elemof(priv->mac) - 1);
        info->rx_offload_capa =
                (priv->hw_csum ?
                 (DEV_RX_OFFLOAD_IPV4_CKSUM |
@@ -3709,6 +3851,8 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
                  DEV_TX_OFFLOAD_UDP_CKSUM |
                  DEV_TX_OFFLOAD_TCP_CKSUM) :
                 0);
+       if (priv_get_ifname(priv, &ifname) == 0)
+               info->if_index = if_nametoindex(ifname);
        priv_unlock(priv);
 }
 
@@ -3827,11 +3971,8 @@ mlx4_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
        priv_lock(priv);
        DEBUG("%p: removing MAC address from index %" PRIu32,
              (void *)dev, index);
-       if (index >= MLX4_MAX_MAC_ADDRESSES)
-               goto end;
-       /* Refuse to remove the broadcast address, this one is special. */
-       if (!memcmp(priv->mac[index].addr_bytes, "\xff\xff\xff\xff\xff\xff",
-                   ETHER_ADDR_LEN))
+       /* Last array entry is reserved for broadcast. */
+       if (index >= (elemof(priv->mac) - 1))
                goto end;
        priv_mac_addr_del(priv, index);
 end:
@@ -3860,11 +4001,8 @@ mlx4_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
        priv_lock(priv);
        DEBUG("%p: adding MAC address at index %" PRIu32,
              (void *)dev, index);
-       if (index >= MLX4_MAX_MAC_ADDRESSES)
-               goto end;
-       /* Refuse to add the broadcast address, this one is special. */
-       if (!memcmp(mac_addr->addr_bytes, "\xff\xff\xff\xff\xff\xff",
-                   ETHER_ADDR_LEN))
+       /* Last array entry is reserved for broadcast. */
+       if (index >= (elemof(priv->mac) - 1))
                goto end;
        priv_mac_addr_add(priv, index,
                          (const uint8_t (*)[ETHER_ADDR_LEN])
@@ -4427,13 +4565,8 @@ static const struct eth_dev_ops mlx4_dev_ops = {
        .mac_addr_remove = mlx4_mac_addr_remove,
        .mac_addr_add = mlx4_mac_addr_add,
        .mtu_set = mlx4_dev_set_mtu,
-       .fdir_add_signature_filter = NULL,
-       .fdir_update_signature_filter = NULL,
-       .fdir_remove_signature_filter = NULL,
-       .fdir_add_perfect_filter = NULL,
-       .fdir_update_perfect_filter = NULL,
-       .fdir_remove_perfect_filter = NULL,
-       .fdir_set_masks = NULL
+       .udp_tunnel_add = NULL,
+       .udp_tunnel_del = NULL,
 };
 
 /**
@@ -4757,6 +4890,11 @@ mlx4_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
                DEBUG("checksum offloading is %ssupported",
                      (priv->hw_csum ? "" : "not "));
 
+               priv->hw_csum_l2tun = !!(exp_device_attr.exp_device_cap_flags &
+                                        IBV_EXP_DEVICE_VXLAN_SUPPORT);
+               DEBUG("L2 tunnel checksum offloads are %ssupported",
+                     (priv->hw_csum_l2tun ? "" : "not "));
+
 #ifdef INLINE_RECV
                priv->inl_recv_size = mlx4_getenv_int("MLX4_INLINE_RECV_SIZE");
 
@@ -4801,7 +4939,7 @@ mlx4_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
                claim_zero(priv_mac_addr_add(priv, 0,
                                             (const uint8_t (*)[ETHER_ADDR_LEN])
                                             mac.addr_bytes));
-               claim_zero(priv_mac_addr_add(priv, 1,
+               claim_zero(priv_mac_addr_add(priv, (elemof(priv->mac) - 1),
                                             &(const uint8_t [ETHER_ADDR_LEN])
                                             { "\xff\xff\xff\xff\xff\xff" }));
 #ifndef NDEBUG