uint32_t flow_flags;
#define BNXT_FLOW_FLAG_L2_HDR_SRC_FILTER_EN BIT(0)
-
pthread_mutex_t flow_lock;
+
+ uint32_t vnic_cap_flags;
+#define BNXT_VNIC_CAP_COS_CLASSIFY BIT(0)
unsigned int rx_nr_rings;
unsigned int rx_cp_nr_rings;
unsigned int rx_num_qs_per_vnic;
uint16_t hwrm_max_ext_req_len;
struct bnxt_link_info link_info;
- struct bnxt_cos_queue_info cos_queue[BNXT_COS_QUEUE_COUNT];
- uint8_t tx_cosq_id;
+ struct bnxt_cos_queue_info rx_cos_queue[BNXT_COS_QUEUE_COUNT];
+ struct bnxt_cos_queue_info tx_cos_queue[BNXT_COS_QUEUE_COUNT];
+ uint8_t tx_cosq_id[BNXT_COS_QUEUE_COUNT];
+ uint8_t rx_cosq_cnt;
uint8_t max_tc;
uint8_t max_lltc;
uint8_t max_q;
goto err_out;
}
+ if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY))
+ goto skip_cosq_cfg;
+
+ for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
+ if (bp->rx_cos_queue[i].id != 0xff) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[j++];
+
+ if (!vnic) {
+ PMD_DRV_LOG(ERR,
+ "Num pools more than FW profile\n");
+ rc = -EINVAL;
+ goto err_out;
+ }
+ vnic->cos_queue_id = bp->rx_cos_queue[i].id;
+ bp->rx_cosq_cnt++;
+ }
+ }
+
+skip_cosq_cfg:
rc = bnxt_mq_rx_configure(bp);
if (rc) {
PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc);
if (rc)
return -EIO;
- rc = bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(bp);
+ rc = bnxt_hwrm_vnic_qcaps(bp);
if (rc)
return rc;
if (rc)
return rc;
- /* Get the MAX capabilities for this function */
+ /* Get the MAX capabilities for this function.
+ * This function also allocates context memory for TQM rings and
+ * informs the firmware about this allocated backing store memory.
+ */
rc = bnxt_hwrm_func_qcaps(bp);
if (rc)
return rc;
- rc = bnxt_hwrm_vnic_qcaps(bp);
+ rc = bnxt_hwrm_func_qcfg(bp, &mtu);
if (rc)
return rc;
- rc = bnxt_hwrm_func_qcfg(bp, &mtu);
+ rc = bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(bp);
if (rc)
return rc;
return rc;
}
+/* VNIC cap covers capability of all VNICs. So no need to pass vnic_id */
int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
{
int rc = 0;
HWRM_CHECK_RESULT();
+ if (rte_le_to_cpu_32(resp->flags) &
+ HWRM_VNIC_QCAPS_OUTPUT_FLAGS_COS_ASSIGNMENT_CAP) {
+ bp->vnic_cap_flags |= BNXT_VNIC_CAP_COS_CLASSIFY;
+ PMD_DRV_LOG(INFO, "CoS assignment capability enabled\n");
+ }
+
bp->max_tpa_v2 = rte_le_to_cpu_16(resp->max_aggs_supported);
HWRM_UNLOCK();
int rc = 0;
struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ uint32_t dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
int i;
+get_rx_info:
HWRM_PREP(req, QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
- req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
+ req.flags = rte_cpu_to_le_32(dir);
/* HWRM Version >= 1.9.1 */
if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
req.drv_qmap_cap =
HWRM_CHECK_RESULT();
-#define GET_QUEUE_INFO(x) \
- bp->cos_queue[x].id = resp->queue_id##x; \
- bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
-
- GET_QUEUE_INFO(0);
- GET_QUEUE_INFO(1);
- GET_QUEUE_INFO(2);
- GET_QUEUE_INFO(3);
- GET_QUEUE_INFO(4);
- GET_QUEUE_INFO(5);
- GET_QUEUE_INFO(6);
- GET_QUEUE_INFO(7);
+ if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
+ GET_TX_QUEUE_INFO(0);
+ GET_TX_QUEUE_INFO(1);
+ GET_TX_QUEUE_INFO(2);
+ GET_TX_QUEUE_INFO(3);
+ GET_TX_QUEUE_INFO(4);
+ GET_TX_QUEUE_INFO(5);
+ GET_TX_QUEUE_INFO(6);
+ GET_TX_QUEUE_INFO(7);
+ } else {
+ GET_RX_QUEUE_INFO(0);
+ GET_RX_QUEUE_INFO(1);
+ GET_RX_QUEUE_INFO(2);
+ GET_RX_QUEUE_INFO(3);
+ GET_RX_QUEUE_INFO(4);
+ GET_RX_QUEUE_INFO(5);
+ GET_RX_QUEUE_INFO(6);
+ GET_RX_QUEUE_INFO(7);
+ }
HWRM_UNLOCK();
+ if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX)
+ goto done;
+
if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
- bp->tx_cosq_id = bp->cos_queue[0].id;
+ bp->tx_cosq_id[0] = bp->tx_cos_queue[0].id;
} else {
+ int j;
+
/* iterate and find the COSq profile to use for Tx */
- for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
- if (bp->cos_queue[i].profile ==
- HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
- bp->tx_cosq_id = bp->cos_queue[i].id;
- break;
+ if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
+ for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
+ if (bp->tx_cos_queue[i].id != 0xff)
+ bp->tx_cosq_id[j++] =
+ bp->tx_cos_queue[i].id;
+ }
+ } else {
+ for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
+ if (bp->tx_cos_queue[i].profile ==
+ HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
+ bp->tx_cosq_id[0] =
+ bp->tx_cos_queue[i].id;
+ break;
+ }
}
}
}
bp->max_tc = BNXT_MAX_QUEUE;
bp->max_q = bp->max_tc;
- PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id);
+ if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
+ dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX;
+ goto get_rx_info;
+ }
+done:
return rc;
}
int bnxt_hwrm_ring_alloc(struct bnxt *bp,
struct bnxt_ring *ring,
uint32_t ring_type, uint32_t map_index,
- uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
+ uint32_t stats_ctx_id, uint32_t cmpl_ring_id,
+ uint16_t tx_cosq_id)
{
int rc = 0;
uint32_t enables = 0;
req.ring_type = ring_type;
req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
- req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id);
+ req.queue_id = rte_cpu_to_le_16(tx_cosq_id);
if (stats_ctx_id != INVALID_STATS_CTX_ID)
enables |=
HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
}
+ if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
+ ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_QUEUE_ID;
+ req.queue_id = rte_cpu_to_le_16(vnic->cos_queue_id);
+ }
+
enables |= ctx_enable_flag;
req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC | \
HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT)
+#define GET_TX_QUEUE_INFO(x) \
+ bp->tx_cos_queue[x].id = resp->queue_id##x; \
+ bp->tx_cos_queue[x].profile = \
+ resp->queue_id##x##_service_profile
+
+#define GET_RX_QUEUE_INFO(x) \
+ bp->rx_cos_queue[x].id = resp->queue_id##x; \
+ bp->rx_cos_queue[x].profile = \
+ resp->queue_id##x##_service_profile
+
int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp,
struct bnxt_vnic_info *vnic);
int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic,
int bnxt_hwrm_ring_alloc(struct bnxt *bp,
struct bnxt_ring *ring,
uint32_t ring_type, uint32_t map_index,
- uint32_t stats_ctx_id, uint32_t cmpl_ring_id);
+ uint32_t stats_ctx_id, uint32_t cmpl_ring_id,
+ uint16_t tx_cosq_id);
int bnxt_hwrm_ring_free(struct bnxt *bp,
struct bnxt_ring *ring, uint32_t ring_type);
int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx);
}
rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, cp_ring_index,
- HWRM_NA_SIGNATURE, nq_ring_id);
+ HWRM_NA_SIGNATURE, nq_ring_id, 0);
if (rc)
return rc;
ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ;
rc = bnxt_hwrm_ring_alloc(bp, nq_ring, ring_type, nq_ring_index,
- HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE);
+ HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE, 0);
if (rc)
return rc;
rc = bnxt_hwrm_ring_alloc(bp, ring, ring_type,
queue_index, cpr->hw_stats_ctx_id,
- cp_ring->fw_ring_id);
+ cp_ring->fw_ring_id, 0);
if (rc)
return rc;
}
rc = bnxt_hwrm_ring_alloc(bp, ring, ring_type, map_idx,
- hw_stats_ctx_id, cp_ring->fw_ring_id);
+ hw_stats_ctx_id, cp_ring->fw_ring_id, 0);
if (rc)
return rc;
struct bnxt_tx_ring_info *txr = txq->tx_ring;
struct bnxt_ring *ring = txr->tx_ring_struct;
unsigned int idx = i + bp->rx_cp_nr_rings;
+ uint16_t tx_cosq_id = 0;
if (BNXT_HAS_NQ(bp)) {
if (bnxt_alloc_nq_ring(bp, idx, nqr))
if (bnxt_alloc_cmpl_ring(bp, idx, cpr, nqr))
goto err_out;
+ if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY)
+ tx_cosq_id = bp->tx_cosq_id[i < bp->max_lltc ? i : 0];
+ else
+ tx_cosq_id = bp->tx_cosq_id[0];
/* Tx ring */
ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_TX;
rc = bnxt_hwrm_ring_alloc(bp, ring,
ring_type,
i, cpr->hw_stats_ctx_id,
- cp_ring->fw_ring_id);
+ cp_ring->fw_ring_id,
+ tx_cosq_id);
if (rc)
goto err_out;
ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL;
rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, 0,
- HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE);
+ HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE, 0);
if (rc)
return rc;
switch (dev_conf->rxmode.mq_mode) {
case ETH_MQ_RX_VMDQ_RSS:
case ETH_MQ_RX_VMDQ_ONLY:
+ case ETH_MQ_RX_VMDQ_DCB_RSS:
/* FALLTHROUGH */
/* ETH_8/64_POOLs */
pools = conf->nb_queue_pools;
pools = max_pools;
break;
case ETH_MQ_RX_RSS:
- pools = 1;
+ pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : 1;
break;
default:
PMD_DRV_LOG(ERR, "Unsupported mq_mod %d\n",
uint16_t cos_rule;
uint16_t lb_rule;
uint16_t rx_queue_cnt;
+ uint16_t cos_queue_id;
bool vlan_strip;
bool func_default;
bool bd_stall;
*****************/
-/* hwrm_vnic_cfg_input (size:320b/40B) */
+/* hwrm_vnic_cfg_input (size:384b/48B) */
struct hwrm_vnic_cfg_input {
/* The HWRM command request type. */
uint16_t req_type;
*/
#define HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID \
UINT32_C(0x40)
+ /* This bit must be '1' for the queue_id field to be configured. */
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_QUEUE_ID \
+ UINT32_C(0x80)
/* Logical vnic ID */
uint16_t vnic_id;
/*
* be chosen if packet does not match any RSS rules.
*/
uint16_t default_cmpl_ring_id;
+ /*
+ * When specified, only incoming packets classified to the specified CoS
+ * queue ID will be arriving on this VNIC. Packet priority to CoS mapping
+ * rules can be specified using HWRM_QUEUE_PRI2COS_CFG. In this mode,
+ * ntuple filters with VNIC destination specified are invalid since they
+ * conflict with the the CoS to VNIC steering rules in this mode.
+ *
+ * If this field is not specified, packet to VNIC steering will be
+ * subject to the standard L2 filter rules and any additional ntuple
+ * filter rules with destination VNIC specified.
+ */
+ uint16_t queue_id;
+ uint8_t unused0[6];
} __attribute__((packed));
/* hwrm_vnic_cfg_output (size:128b/16B) */
*/
#define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_OUTERMOST_RSS_CAP \
UINT32_C(0x80)
+ /*
+ * When this bit is '1', it indicates that firmware supports the
+ * ability to steer incoming packets from one CoS queue to one
+ * VNIC. This optional feature can then be enabled
+ * using HWRM_VNIC_CFG on any VNIC. This feature is only
+ * available when NVM option “enable_cos_classfication” is set
+ * to 1. If set to '0', firmware does not support this feature.
+ */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_COS_ASSIGNMENT_CAP \
+ UINT32_C(0x100)
/*
* This field advertises the maximum concurrent TPA aggregations
* supported by the VNIC on new devices that support TPA v2.