struct ena_admin_acq_entry *comp,
size_t comp_size_in_bytes)
{
- unsigned long flags;
+ unsigned long flags = 0;
struct ena_comp_ctx *comp_ctx;
ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
struct ena_comp_ctx *comp_ctx,
struct ena_com_admin_queue *admin_queue)
{
- unsigned long flags;
+ unsigned long flags = 0;
int ret = 0;
ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event,
volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
mmio_read->read_resp;
u32 mmio_read_reg, ret;
- unsigned long flags;
+ unsigned long flags = 0;
int i;
ENA_MIGHT_SLEEP();
void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
{
struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
- unsigned long flags;
+ unsigned long flags = 0;
ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) {
void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
{
struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
- unsigned long flags;
+ unsigned long flags = 0;
ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
ena_dev->admin_queue.running_state = state;
if (unlikely(rc))
return rc;
- rss->hash_func = get_resp.u.flow_hash_func.selected_func;
+ rss->hash_func = (enum ena_admin_hash_functions)get_resp.u.flow_hash_func.selected_func;
if (func)
*func = rss->hash_func;
int rc, i;
/* Get the supported hash input */
- rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
+ rc = ena_com_get_hash_ctrl(ena_dev, (enum ena_admin_flow_hash_proto)0, NULL);
if (unlikely(rc))
return rc;
/* In case of failure, restore the old hash ctrl */
if (unlikely(rc))
- ena_com_get_hash_ctrl(ena_dev, 0, NULL);
+ ena_com_get_hash_ctrl(ena_dev, (enum ena_admin_flow_hash_proto)0, NULL);
return rc;
}
/* In case of failure, restore the old hash ctrl */
if (unlikely(rc))
- ena_com_get_hash_ctrl(ena_dev, 0, NULL);
+ ena_com_get_hash_ctrl(ena_dev, (enum ena_admin_flow_hash_proto)0, NULL);
return 0;
}
*/
return;
- curr_moder_idx = *moder_tbl_idx;
+ curr_moder_idx = (enum ena_intr_moder_level)*moder_tbl_idx;
if (unlikely(curr_moder_idx >= ENA_INTR_MAX_NUM_OF_LEVELS)) {
ena_trc_err("Wrong moderation index %u\n", curr_moder_idx);
return;
if (curr_moder_idx == ENA_INTR_MODER_LOWEST) {
if ((pkts > curr_moder_entry->pkts_per_interval) ||
(bytes > curr_moder_entry->bytes_per_interval))
- new_moder_idx = curr_moder_idx + 1;
+ new_moder_idx = (enum ena_intr_moder_level)(curr_moder_idx + 1);
} else {
pred_moder_entry = &intr_moder_tbl[curr_moder_idx - 1];
if ((pkts <= pred_moder_entry->pkts_per_interval) ||
(bytes <= pred_moder_entry->bytes_per_interval))
- new_moder_idx = curr_moder_idx - 1;
+ new_moder_idx = (enum ena_intr_moder_level)(curr_moder_idx - 1);
else if ((pkts > curr_moder_entry->pkts_per_interval) ||
(bytes > curr_moder_entry->bytes_per_interval)) {
if (curr_moder_idx != ENA_INTR_MODER_HIGHEST)
- new_moder_idx = curr_moder_idx + 1;
+ new_moder_idx = (enum ena_intr_moder_level)(curr_moder_idx + 1);
}
}
new_moder_entry = &intr_moder_tbl[new_moder_idx];
static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
struct ena_eth_io_rx_cdesc_base *cdesc)
{
- ena_rx_ctx->l3_proto = cdesc->status &
- ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
- ena_rx_ctx->l4_proto =
- (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
- ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
+ ena_rx_ctx->l3_proto = (enum ena_eth_io_l3_proto_index)(cdesc->status &
+ ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK);
+ ena_rx_ctx->l4_proto = (enum ena_eth_io_l4_proto_index)
+ ((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT);
ena_rx_ctx->l3_csum_err =
(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT;