0x0, io_sq->llq_info.desc_list_entry_size);
io_sq->llq_buf_ctrl.descs_left_in_line =
io_sq->llq_info.descs_num_before_header;
+ io_sq->disable_meta_caching =
+ io_sq->llq_info.disable_meta_caching;
if (io_sq->llq_info.max_entries_in_tx_burst > 0)
io_sq->entries_in_tx_burst_left =
cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
+ if (llq_info->disable_meta_caching)
+ cmd.u.llq.accel_mode.u.set.enabled_flags |=
+ BIT(ENA_ADMIN_DISABLE_META_CACHING);
+
+ if (llq_info->max_entries_in_tx_burst)
+ cmd.u.llq.accel_mode.u.set.enabled_flags |=
+ BIT(ENA_ADMIN_LIMIT_TX_BURST);
+
ret = ena_com_execute_admin_command(admin_queue,
(struct ena_admin_aq_entry *)&cmd,
sizeof(cmd),
supported_feat,
llq_info->descs_num_before_header);
}
+ /* Check for accelerated queue supported */
+ llq_info->disable_meta_caching =
+ llq_features->accel_mode.u.get.supported_flags &
+ BIT(ENA_ADMIN_DISABLE_META_CACHING);
- llq_info->max_entries_in_tx_burst =
- (u16)(llq_features->max_tx_burst_size / llq_default_cfg->llq_ring_entry_size_value);
+ if (llq_features->accel_mode.u.get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST))
+ llq_info->max_entries_in_tx_burst =
+ llq_features->accel_mode.u.get.max_tx_burst_size /
+ llq_default_cfg->llq_ring_entry_size_value;
rc = ena_com_set_llq(ena_dev);
if (rc)
u16 descs_num_before_header;
u16 descs_per_entry;
u16 max_entries_in_tx_burst;
+ bool disable_meta_caching;
};
struct ena_com_io_cq {
enum queue_direction direction;
enum ena_admin_placement_policy_type mem_queue_type;
+ bool disable_meta_caching;
+
u32 msix_vector;
struct ena_com_tx_meta cached_tx_meta;
struct ena_com_llq_info llq_info;
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates.
+ * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
* All rights reserved.
*/
ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY = 2,
};
+enum ena_admin_accel_mode_feat {
+ ENA_ADMIN_DISABLE_META_CACHING = 0,
+ ENA_ADMIN_LIMIT_TX_BURST = 1,
+};
+
+struct ena_admin_accel_mode_get {
+ /* bit field of enum ena_admin_accel_mode_feat */
+ uint16_t supported_flags;
+
+ /* maximum burst size between two doorbells. The size is in bytes */
+ uint16_t max_tx_burst_size;
+};
+
+struct ena_admin_accel_mode_set {
+ /* bit field of enum ena_admin_accel_mode_feat */
+ uint16_t enabled_flags;
+
+ uint16_t reserved;
+};
+
+struct ena_admin_accel_mode_req {
+ union {
+ uint32_t raw[2];
+
+ struct ena_admin_accel_mode_get get;
+
+ struct ena_admin_accel_mode_set set;
+ } u;
+};
+
struct ena_admin_feature_llq_desc {
uint32_t max_llq_num;
/* the stride control the driver selected to use */
uint16_t descriptors_stride_ctrl_enabled;
- /* Maximum size in bytes taken by llq entries in a single tx burst.
- * Set to 0 when there is no such limit.
+ /* reserved */
+ uint32_t reserved1;
+
+ /* accelerated low latency queues requirement. Driver needs to
+ * support those requirements in order to use accelerated LLQ
*/
- uint32_t max_tx_burst_size;
+ struct ena_admin_accel_mode_req accel_mode;
};
struct ena_admin_queue_ext_feature_fields {
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates.
+ * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
* All rights reserved.
*/
return count;
}
-static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
- struct ena_com_tx_ctx *ena_tx_ctx)
+static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_meta *ena_meta)
{
struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
- struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
meta_desc = get_sq_desc(io_sq);
memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
/* Extended meta desc */
meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
- meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
meta_desc->len_ctrl |= (io_sq->phase <<
ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
+ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
+
meta_desc->word2 |= ena_meta->l3_hdr_len &
ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
- meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
+ return ena_com_sq_update_tail(io_sq);
+}
- /* Cached the meta desc */
- memcpy(&io_sq->cached_tx_meta, ena_meta,
- sizeof(struct ena_com_tx_meta));
+static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_ctx *ena_tx_ctx,
+ bool *have_meta)
+{
+ struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
- return ena_com_sq_update_tail(io_sq);
+ /* When disable meta caching is set, don't bother to save the meta and
+ * compare it to the stored version, just create the meta
+ */
+ if (io_sq->disable_meta_caching) {
+ if (unlikely(!ena_tx_ctx->meta_valid))
+ return ENA_COM_INVAL;
+
+ *have_meta = true;
+ return ena_com_create_meta(io_sq, ena_meta);
+ } else if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) {
+ *have_meta = true;
+ /* Cache the meta desc */
+ memcpy(&io_sq->cached_tx_meta, ena_meta,
+ sizeof(struct ena_com_tx_meta));
+ return ena_com_create_meta(io_sq, ena_meta);
+ } else {
+ *have_meta = false;
+ return ENA_COM_OK;
+ }
}
static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
if (unlikely(rc))
return rc;
- have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq,
- ena_tx_ctx);
- if (have_meta) {
- rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
- if (unlikely(rc))
- return rc;
+ rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta);
+ if (unlikely(rc)) {
+ ena_trc_err("failed to create and store tx meta desc\n");
+ return rc;
}
/* If the caller doesn't want to send packets */