mbuf: make segment prefree function public
[dpdk.git] / drivers / net / ixgbe / ixgbe_rxtx.c
index 2ce8234..0808a4c 100644 (file)
@@ -1,7 +1,7 @@
 /*-
  *   BSD LICENSE
  *
- *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
  *   Copyright 2014 6WIND S.A.
  *   All rights reserved.
  *
@@ -70,6 +70,7 @@
 #include <rte_string_fns.h>
 #include <rte_errno.h>
 #include <rte_ip.h>
+#include <rte_net.h>
 
 #include "ixgbe_logs.h"
 #include "base/ixgbe_api.h"
 #include "base/ixgbe_common.h"
 #include "ixgbe_rxtx.h"
 
+#ifdef RTE_LIBRTE_IEEE1588
+#define IXGBE_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
+#else
+#define IXGBE_TX_IEEE1588_TMST 0
+#endif
 /* Bit Mask to indicate what bits required for building TX context */
 #define IXGBE_TX_OFFLOAD_MASK (                         \
                PKT_TX_VLAN_PKT |                \
                PKT_TX_IP_CKSUM |                \
                PKT_TX_L4_MASK |                 \
                PKT_TX_TCP_SEG |                 \
-               PKT_TX_OUTER_IP_CKSUM)
+               PKT_TX_MACSEC |                  \
+               PKT_TX_OUTER_IP_CKSUM |          \
+               IXGBE_TX_IEEE1588_TMST)
+
+#define IXGBE_TX_OFFLOAD_NOTSUP_MASK \
+               (PKT_TX_OFFLOAD_MASK ^ IXGBE_TX_OFFLOAD_MASK)
 
 #if 1
 #define RTE_PMD_USE_PREFETCH
 #define rte_ixgbe_prefetch(p)   do {} while (0)
 #endif
 
+#ifdef RTE_IXGBE_INC_VECTOR
+uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+                                   uint16_t nb_pkts);
+#endif
+
 /*********************************************************************
  *
  *  TX functions
@@ -131,7 +147,7 @@ ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
 
        for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
                /* free buffers one at a time */
-               m = __rte_pktmbuf_prefree_seg(txep->mbuf);
+               m = rte_pktmbuf_prefree_seg(txep->mbuf);
                txep->mbuf = NULL;
 
                if (unlikely(m == NULL))
@@ -321,7 +337,7 @@ tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 
        /* update tail pointer */
        rte_wmb();
-       IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
+       IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, txq->tx_tail);
 
        return nb_pkts;
 }
@@ -352,6 +368,30 @@ ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
        return nb_tx;
 }
 
+#ifdef RTE_IXGBE_INC_VECTOR
+static uint16_t
+ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+                   uint16_t nb_pkts)
+{
+       uint16_t nb_tx = 0;
+       struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+
+       while (nb_pkts) {
+               uint16_t ret, num;
+
+               num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+               ret = ixgbe_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
+                                                num);
+               nb_tx += ret;
+               nb_pkts -= ret;
+               if (ret < num)
+                       break;
+       }
+
+       return nb_tx;
+}
+#endif
+
 static inline void
 ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
                volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
@@ -519,6 +559,8 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
                cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
        if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
                cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT);
+       if (ol_flags & PKT_TX_MACSEC)
+               cmdtype |= IXGBE_ADVTXD_MAC_LINKSEC;
        return cmdtype;
 }
 
@@ -897,12 +939,63 @@ end_of_tx:
        PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
                   (unsigned) txq->port_id, (unsigned) txq->queue_id,
                   (unsigned) tx_id, (unsigned) nb_tx);
-       IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
+       IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
        txq->tx_tail = tx_id;
 
        return nb_tx;
 }
 
+/*********************************************************************
+ *
+ *  TX prep functions
+ *
+ **********************************************************************/
+uint16_t
+ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+       int i, ret;
+       uint64_t ol_flags;
+       struct rte_mbuf *m;
+       struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+
+       for (i = 0; i < nb_pkts; i++) {
+               m = tx_pkts[i];
+               ol_flags = m->ol_flags;
+
+               /**
+                * Check if packet meets requirements for number of segments
+                *
+                * NOTE: for ixgbe it's always (40 - WTHRESH) for both TSO and
+                *       non-TSO
+                */
+
+               if (m->nb_segs > IXGBE_TX_MAX_SEG - txq->wthresh) {
+                       rte_errno = -EINVAL;
+                       return i;
+               }
+
+               if (ol_flags & IXGBE_TX_OFFLOAD_NOTSUP_MASK) {
+                       rte_errno = -ENOTSUP;
+                       return i;
+               }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+               ret = rte_validate_tx_offload(m);
+               if (ret != 0) {
+                       rte_errno = ret;
+                       return i;
+               }
+#endif
+               ret = rte_net_intel_cksum_prepare(m);
+               if (ret != 0) {
+                       rte_errno = ret;
+                       return i;
+               }
+       }
+
+       return i;
+}
+
 /*********************************************************************
  *
  *  RX functions
@@ -1402,17 +1495,19 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
        for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST;
             i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) {
                /* Read desc statuses backwards to avoid race condition */
-               for (j = LOOK_AHEAD-1; j >= 0; --j)
+               for (j = 0; j < LOOK_AHEAD; j++)
                        s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);
 
-               for (j = LOOK_AHEAD - 1; j >= 0; --j)
-                       pkt_info[j] = rte_le_to_cpu_32(rxdp[j].wb.lower.
-                                                      lo_dword.data);
+               rte_smp_rmb();
 
                /* Compute how many status bits were set */
-               nb_dd = 0;
-               for (j = 0; j < LOOK_AHEAD; ++j)
-                       nb_dd += s[j] & IXGBE_RXDADV_STAT_DD;
+               for (nb_dd = 0; nb_dd < LOOK_AHEAD &&
+                               (s[nb_dd] & IXGBE_RXDADV_STAT_DD); nb_dd++)
+                       ;
+
+               for (j = 0; j < nb_dd; j++)
+                       pkt_info[j] = rte_le_to_cpu_32(rxdp[j].wb.lower.
+                                                      lo_dword.data);
 
                nb_rx += nb_dd;
 
@@ -1581,7 +1676,8 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 
                /* update tail pointer */
                rte_wmb();
-               IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, cur_free_trigger);
+               IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
+                                           cur_free_trigger);
        }
 
        if (rxq->rx_tail >= rxq->nb_rx_desc)
@@ -1985,8 +2081,8 @@ next_desc:
 
                        if (!ixgbe_rx_alloc_bufs(rxq, false)) {
                                rte_wmb();
-                               IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr,
-                                                   next_rdt);
+                               IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
+                                                           next_rdt);
                                nb_hold -= rxq->rx_free_thresh;
                        } else {
                                PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
@@ -2157,7 +2253,7 @@ next_desc:
                           rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
 
                rte_wmb();
-               IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, prev_id);
+               IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr, prev_id);
                nb_hold = 0;
        }
 
@@ -2282,6 +2378,7 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
        if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
                        && (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
                PMD_INIT_LOG(DEBUG, "Using simple tx code path");
+               dev->tx_pkt_prepare = NULL;
 #ifdef RTE_IXGBE_INC_VECTOR
                if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
                                (rte_eal_process_type() != RTE_PROC_PRIMARY ||
@@ -2302,6 +2399,7 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
                                (unsigned long)txq->tx_rs_thresh,
                                (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
                dev->tx_pkt_burst = ixgbe_xmit_pkts;
+               dev->tx_pkt_prepare = ixgbe_prep_pkts;
        }
 }
 
@@ -2585,7 +2683,6 @@ check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
         *   rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST
         *   rxq->rx_free_thresh < rxq->nb_rx_desc
         *   (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
-        *   rxq->nb_rx_desc<(IXGBE_MAX_RING_DESC-RTE_PMD_IXGBE_RX_MAX_BURST)
         * Scattered packets are not supported.  This should be checked
         * outside of this function.
         */
@@ -2607,15 +2704,6 @@ check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
                             "rxq->rx_free_thresh=%d",
                             rxq->nb_rx_desc, rxq->rx_free_thresh);
                ret = -EINVAL;
-       } else if (!(rxq->nb_rx_desc <
-              (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST))) {
-               PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
-                            "rxq->nb_rx_desc=%d, "
-                            "IXGBE_MAX_RING_DESC=%d, "
-                            "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
-                            rxq->nb_rx_desc, IXGBE_MAX_RING_DESC,
-                            RTE_PMD_IXGBE_RX_MAX_BURST);
-               ret = -EINVAL;
        }
 
        return ret;
@@ -2632,12 +2720,7 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
        /*
         * By default, the Rx queue setup function allocates enough memory for
         * IXGBE_MAX_RING_DESC.  The Rx Burst bulk allocation function requires
-        * extra memory at the end of the descriptor ring to be zero'd out. A
-        * pre-condition for using the Rx burst bulk alloc function is that the
-        * number of descriptors is less than or equal to
-        * (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST). Check all the
-        * constraints here to see if we need to zero out memory after the end
-        * of the H/W descriptor ring.
+        * extra memory at the end of the descriptor ring to be zero'd out.
         */
        if (adapter->rx_bulk_alloc_allowed)
                /* zero out extra memory */
@@ -2857,11 +2940,6 @@ ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
        struct ixgbe_rx_queue *rxq;
        uint32_t desc = 0;
 
-       if (rx_queue_id >= dev->data->nb_rx_queues) {
-               PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
-               return 0;
-       }
-
        rxq = dev->data->rx_queues[rx_queue_id];
        rxdp = &(rxq->rx_ring[rxq->rx_tail]);
 
@@ -2896,6 +2974,63 @@ ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
                        rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD));
 }
 
+int
+ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+       struct ixgbe_rx_queue *rxq = rx_queue;
+       volatile uint32_t *status;
+       uint32_t nb_hold, desc;
+
+       if (unlikely(offset >= rxq->nb_rx_desc))
+               return -EINVAL;
+
+#ifdef RTE_IXGBE_INC_VECTOR
+       if (rxq->rx_using_sse)
+               nb_hold = rxq->rxrearm_nb;
+       else
+#endif
+               nb_hold = rxq->nb_rx_hold;
+       if (offset >= rxq->nb_rx_desc - nb_hold)
+               return RTE_ETH_RX_DESC_UNAVAIL;
+
+       desc = rxq->rx_tail + offset;
+       if (desc >= rxq->nb_rx_desc)
+               desc -= rxq->nb_rx_desc;
+
+       status = &rxq->rx_ring[desc].wb.upper.status_error;
+       if (*status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))
+               return RTE_ETH_RX_DESC_DONE;
+
+       return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+       struct ixgbe_tx_queue *txq = tx_queue;
+       volatile uint32_t *status;
+       uint32_t desc;
+
+       if (unlikely(offset >= txq->nb_tx_desc))
+               return -EINVAL;
+
+       desc = txq->tx_tail + offset;
+       /* go to next desc that has the RS bit */
+       desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
+               txq->tx_rs_thresh;
+       if (desc >= txq->nb_tx_desc) {
+               desc -= txq->nb_tx_desc;
+               if (desc >= txq->nb_tx_desc)
+                       desc -= txq->nb_tx_desc;
+       }
+
+       status = &txq->tx_ring[desc].wb.status;
+       if (*status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD))
+               return RTE_ETH_TX_DESC_DONE;
+
+       return RTE_ETH_TX_DESC_FULL;
+}
+
 void __attribute__((cold))
 ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
 {
@@ -3313,15 +3448,15 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 
 /**
  * ixgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
- * @hw: pointer to hardware structure
+ * @dev: pointer to eth_dev structure
  * @dcb_config: pointer to ixgbe_dcb_config structure
  */
 static void
-ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw,
+ixgbe_dcb_tx_hw_config(struct rte_eth_dev *dev,
                       struct ixgbe_dcb_config *dcb_config)
 {
        uint32_t reg;
-       uint32_t q;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        PMD_INIT_FUNC_TRACE();
        if (hw->mac.type != ixgbe_mac_82598EB) {
@@ -3340,11 +3475,6 @@ ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw,
                        reg |= IXGBE_MTQC_VT_ENA;
                IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
 
-               /* Disable drop for all queues */
-               for (q = 0; q < 128; q++)
-                       IXGBE_WRITE_REG(hw, IXGBE_QDE,
-                               (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
-
                /* Enable the Tx desc arbiter */
                reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
                reg &= ~IXGBE_RTTDCS_ARBDIS;
@@ -3378,7 +3508,7 @@ ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
                        vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
 
        /*Configure general DCB TX parameters*/
-       ixgbe_dcb_tx_hw_config(hw, dcb_config);
+       ixgbe_dcb_tx_hw_config(dev, dcb_config);
 }
 
 static void
@@ -3478,16 +3608,18 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
 
 /**
  * ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
- * @hw: pointer to hardware structure
+ * @dev: pointer to eth_dev structure
  * @dcb_config: pointer to ixgbe_dcb_config structure
  */
 static void
-ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
-              struct ixgbe_dcb_config *dcb_config)
+ixgbe_dcb_rx_hw_config(struct rte_eth_dev *dev,
+                      struct ixgbe_dcb_config *dcb_config)
 {
        uint32_t reg;
        uint32_t vlanctrl;
        uint8_t i;
+       uint32_t q;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        PMD_INIT_FUNC_TRACE();
        /*
@@ -3525,6 +3657,21 @@ ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
                }
 
                IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
+
+               if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+                       /* Disable drop for all queues in VMDQ mode*/
+                       for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
+                               IXGBE_WRITE_REG(hw, IXGBE_QDE,
+                                               (IXGBE_QDE_WRITE |
+                                                (q << IXGBE_QDE_IDX_SHIFT)));
+               } else {
+                       /* Enable drop for all queues in SRIOV mode */
+                       for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
+                               IXGBE_WRITE_REG(hw, IXGBE_QDE,
+                                               (IXGBE_QDE_WRITE |
+                                                (q << IXGBE_QDE_IDX_SHIFT) |
+                                                IXGBE_QDE_ENABLE));
+               }
        }
 
        /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
@@ -3615,6 +3762,8 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
        uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
        struct ixgbe_hw *hw =
                        IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_bw_conf *bw_conf =
+               IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
 
        switch (dev->data->dev_conf.rxmode.mq_mode) {
        case ETH_MQ_RX_VMDQ_DCB:
@@ -3637,7 +3786,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
                /* Get dcb TX configuration parameters from rte_eth_conf */
                ixgbe_dcb_rx_config(dev, dcb_config);
                /*Configure general DCB RX parameters*/
-               ixgbe_dcb_rx_hw_config(hw, dcb_config);
+               ixgbe_dcb_rx_hw_config(dev, dcb_config);
                break;
        default:
                PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
@@ -3661,7 +3810,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
                /*get DCB TX configuration parameters from rte_eth_conf*/
                ixgbe_dcb_tx_config(dev, dcb_config);
                /*Configure general DCB TX parameters*/
-               ixgbe_dcb_tx_hw_config(hw, dcb_config);
+               ixgbe_dcb_tx_hw_config(dev, dcb_config);
                break;
        default:
                PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
@@ -3686,8 +3835,9 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
                /* Re-configure 4 TCs BW */
                for (i = 0; i < nb_tcs; i++) {
                        tc = &dcb_config->tc_config[i];
-                       tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
-                                               (uint8_t)(100 / nb_tcs);
+                       if (bw_conf->tc_num != nb_tcs)
+                               tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
+                                       (uint8_t)(100 / nb_tcs);
                        tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
                                                (uint8_t)(100 / nb_tcs);
                }
@@ -3696,6 +3846,16 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
                        tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
                        tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0;
                }
+       } else {
+               /* Re-configure 8 TCs BW */
+               for (i = 0; i < nb_tcs; i++) {
+                       tc = &dcb_config->tc_config[i];
+                       if (bw_conf->tc_num != nb_tcs)
+                               tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
+                                       (uint8_t)(100 / nb_tcs + (i & 1));
+                       tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
+                               (uint8_t)(100 / nb_tcs + (i & 1));
+               }
        }
 
        switch (hw->mac.type) {
@@ -3810,7 +3970,7 @@ void ixgbe_configure_dcb(struct rte_eth_dev *dev)
            (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS))
                return;
 
-       if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES)
+       if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES)
                return;
 
        /** Configure DCB hardware **/
@@ -4073,21 +4233,24 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
                        break;
                }
        } else {
-               /*
-                * SRIOV active scheme
-                * Support RSS together with VMDq & SRIOV
+               /* SRIOV active scheme
+                * Support RSS together with SRIOV.
                 */
                switch (dev->data->dev_conf.rxmode.mq_mode) {
                case ETH_MQ_RX_RSS:
                case ETH_MQ_RX_VMDQ_RSS:
                        ixgbe_config_vf_rss(dev);
                        break;
-
-               /* FIXME if support DCB/RSS together with VMDq & SRIOV */
                case ETH_MQ_RX_VMDQ_DCB:
+               case ETH_MQ_RX_DCB:
+               /* In SRIOV, the configuration is the same as VMDq case */
+                       ixgbe_vmdq_dcb_configure(dev);
+                       break;
+               /* DCB/RSS together with SRIOV is not supported */
                case ETH_MQ_RX_VMDQ_DCB_RSS:
+               case ETH_MQ_RX_DCB_RSS:
                        PMD_INIT_LOG(ERR,
-                               "Could not support DCB with VMDq & SRIOV");
+                               "Could not support DCB/RSS with VMDq & SRIOV");
                        return -1;
                default:
                        ixgbe_config_vf_default(dev);