#define IAVF_BITS_PER_BYTE 8
+#define IAVF_VLAN_TAG_PCP_OFFSET 13
+
struct iavf_adapter;
struct iavf_rx_queue;
struct iavf_tx_queue;
bool committed;
};
+/* Struct to store queue TC mapping. Queue is continuous in one TC */
+struct iavf_qtc_map {
+ uint8_t tc;
+ uint16_t start_queue_id;
+ uint16_t queue_count;
+};
+
/* Structure to store private data specific for VF instance. */
struct iavf_info {
uint16_t num_queue_pairs;
bool lv_enabled;
struct virtchnl_qos_cap_list *qos_cap;
+ struct iavf_qtc_map *qtc_map;
struct iavf_tm_conf tm_conf;
};
ad->tx_vec_allowed = false;
}
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS &&
+ vf->tm_conf.committed) {
+ int tc;
+ for (tc = 0; tc < vf->qos_cap->num_elem; tc++) {
+ if (txq->queue_id >= vf->qtc_map[tc].start_queue_id &&
+ txq->queue_id < (vf->qtc_map[tc].start_queue_id +
+ vf->qtc_map[tc].queue_count))
+ break;
+ }
+ if (tc >= vf->qos_cap->num_elem) {
+ PMD_INIT_LOG(ERR, "Queue TC mapping is not correct");
+ return -EINVAL;
+ }
+ txq->tc = tc;
+ }
+
return 0;
}
return nb_tx;
}
+/* Check if the packet with vlan user priority is transmitted in the
+ * correct queue.
+ */
+static int
+iavf_check_vlan_up2tc(struct iavf_tx_queue *txq, struct rte_mbuf *m)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id];
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ uint16_t up;
+
+ up = m->vlan_tci >> IAVF_VLAN_TAG_PCP_OFFSET;
+
+ if (!(vf->qos_cap->cap[txq->tc].tc_prio & BIT(up))) {
+ PMD_TX_LOG(ERR, "packet with vlan pcp %u cannot transmit in queue %u\n",
+ up, txq->queue_id);
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
/* TX prep functions */
uint16_t
iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
int i, ret;
uint64_t ol_flags;
struct rte_mbuf *m;
+ struct iavf_tx_queue *txq = tx_queue;
+ struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id];
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
for (i = 0; i < nb_pkts; i++) {
m = tx_pkts[i];
rte_errno = -ret;
return i;
}
+
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS &&
+ ol_flags & (PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN)) {
+ ret = iavf_check_vlan_up2tc(txq, m);
+ if (ret != 0) {
+ rte_errno = -ret;
+ return i;
+ }
+ }
}
return i;
#define IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(0)
#define IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2 BIT(1)
uint8_t vlan_flag;
+ uint8_t tc;
};
/* Offload features */
struct virtchnl_queue_tc_mapping *q_tc_mapping;
struct iavf_tm_node_list *queue_list = &vf->tm_conf.queue_list;
struct iavf_tm_node *tm_node;
+ struct iavf_qtc_map *qtc_map;
uint16_t size;
int index = 0, node_committed = 0;
int i, ret_val = IAVF_SUCCESS;
q_tc_mapping->vsi_id = vf->vsi.vsi_id;
q_tc_mapping->num_tc = vf->qos_cap->num_elem;
q_tc_mapping->num_queue_pairs = vf->num_queue_pairs;
+
TAILQ_FOREACH(tm_node, queue_list, node) {
if (tm_node->tc >= q_tc_mapping->num_tc) {
PMD_DRV_LOG(ERR, "TC%d is not enabled", tm_node->tc);
goto fail_clear;
}
+ /* store the queue TC mapping info */
+ qtc_map = rte_zmalloc("qtc_map",
+ sizeof(struct iavf_qtc_map) * q_tc_mapping->num_tc, 0);
+ if (!qtc_map)
+ return IAVF_ERR_NO_MEMORY;
+
for (i = 0; i < q_tc_mapping->num_tc; i++) {
q_tc_mapping->tc[i].req.start_queue_id = index;
index += q_tc_mapping->tc[i].req.queue_count;
+ qtc_map[i].tc = i;
+ qtc_map[i].start_queue_id =
+ q_tc_mapping->tc[i].req.start_queue_id;
+ qtc_map[i].queue_count = q_tc_mapping->tc[i].req.queue_count;
}
ret_val = iavf_set_q_tc_map(dev, q_tc_mapping, size);
if (ret_val)
goto fail_clear;
+ vf->qtc_map = qtc_map;
vf->tm_conf.committed = true;
return ret_val;