net/ice: clean input set macro definition
[dpdk.git] / drivers / net / ena / base / ena_com.c
index 1463f5f..aae6872 100644 (file)
@@ -413,19 +413,21 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
        size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
        io_cq->bus = ena_dev->bus;
 
-       ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
-                       size,
-                       io_cq->cdesc_addr.virt_addr,
-                       io_cq->cdesc_addr.phys_addr,
-                       io_cq->cdesc_addr.mem_handle,
-                       ctx->numa_node,
-                       prev_node);
+       ENA_MEM_ALLOC_COHERENT_NODE_ALIGNED(ena_dev->dmadev,
+                                           size,
+                                           io_cq->cdesc_addr.virt_addr,
+                                           io_cq->cdesc_addr.phys_addr,
+                                           io_cq->cdesc_addr.mem_handle,
+                                           ctx->numa_node,
+                                           prev_node,
+                                           ENA_CDESC_RING_SIZE_ALIGNMENT);
        if (!io_cq->cdesc_addr.virt_addr) {
-               ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
-                                      size,
-                                      io_cq->cdesc_addr.virt_addr,
-                                      io_cq->cdesc_addr.phys_addr,
-                                      io_cq->cdesc_addr.mem_handle);
+               ENA_MEM_ALLOC_COHERENT_ALIGNED(ena_dev->dmadev,
+                                              size,
+                                              io_cq->cdesc_addr.virt_addr,
+                                              io_cq->cdesc_addr.phys_addr,
+                                              io_cq->cdesc_addr.mem_handle,
+                                              ENA_CDESC_RING_SIZE_ALIGNMENT);
        }
 
        if (!io_cq->cdesc_addr.virt_addr) {
@@ -1062,23 +1064,30 @@ static int ena_com_get_feature(struct ena_com_dev *ena_dev,
                                      feature_ver);
 }
 
+int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
+{
+       return ena_dev->rss.hash_func;
+}
+
 static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
 {
        struct ena_admin_feature_rss_flow_hash_control *hash_key =
                (ena_dev->rss).hash_key;
 
        ENA_RSS_FILL_KEY(&hash_key->key, sizeof(hash_key->key));
-       /* The key is stored in the device in uint32_t array
-        * as well as the API requires the key to be passed in this
-        * format. Thus the size of our array should be divided by 4
+       /* The key buffer is stored in the device in an array of
+        * uint32 elements.
         */
-       hash_key->keys_num = sizeof(hash_key->key) / sizeof(uint32_t);
+       hash_key->keys_num = ENA_ADMIN_RSS_KEY_PARTS;
 }
 
 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
 {
        struct ena_rss *rss = &ena_dev->rss;
 
+       if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION))
+               return ENA_COM_UNSUPPORTED;
+
        ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
                               sizeof(*rss->hash_key),
                               rss->hash_key,
@@ -1694,7 +1703,7 @@ void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
        ena_dev->admin_queue.polling = polling;
 }
 
-bool ena_com_get_admin_polling_mode(struct ena_com_dev * ena_dev)
+bool ena_com_get_admin_polling_mode(struct ena_com_dev *ena_dev)
 {
        return ena_dev->admin_queue.polling;
 }
@@ -2408,15 +2417,15 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
 }
 
 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
-                             enum ena_admin_hash_functions *func,
-                             u8 *key)
+                             enum ena_admin_hash_functions *func)
 {
        struct ena_rss *rss = &ena_dev->rss;
        struct ena_admin_get_feat_resp get_resp;
-       struct ena_admin_feature_rss_flow_hash_control *hash_key =
-               rss->hash_key;
        int rc;
 
+       if (unlikely(!func))
+               return ENA_COM_INVAL;
+
        rc = ena_com_get_feature_ex(ena_dev, &get_resp,
                                    ENA_ADMIN_RSS_HASH_FUNCTION,
                                    rss->hash_key_dma_addr,
@@ -2424,13 +2433,20 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
        if (unlikely(rc))
                return rc;
 
-       /* ENA_FFS returns 1 in case the lsb is set */
+       /* ENA_FFS() returns 1 in case the lsb is set */
        rss->hash_func = ENA_FFS(get_resp.u.flow_hash_func.selected_func);
        if (rss->hash_func)
                rss->hash_func--;
 
-       if (func)
-               *func = rss->hash_func;
+       *func = rss->hash_func;
+
+       return 0;
+}
+
+int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key)
+{
+       struct ena_admin_feature_rss_flow_hash_control *hash_key =
+               ena_dev->rss.hash_key;
 
        if (key)
                memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
@@ -2708,12 +2724,16 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
        if (unlikely(rc))
                goto err_indr_tbl;
 
+       /* The following function might return unsupported in case the
+        * device doesn't support setting the key / hash function. We can safely
+        * ignore this error and have indirection table support only.
+        */
        rc = ena_com_hash_key_allocate(ena_dev);
-       if (unlikely(rc))
+       if (likely(!rc))
+               ena_com_hash_key_fill_default_key(ena_dev);
+       else if (rc != ENA_COM_UNSUPPORTED)
                goto err_hash_key;
 
-       ena_com_hash_key_fill_default_key(ena_dev);
-
        rc = ena_com_hash_ctrl_init(ena_dev);
        if (unlikely(rc))
                goto err_hash_ctrl;
@@ -2940,8 +2960,8 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
                            struct ena_admin_feature_llq_desc *llq_features,
                            struct ena_llq_configurations *llq_default_cfg)
 {
+       struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
        int rc;
-       struct ena_com_llq_info *llq_info = &(ena_dev->llq_info);;
 
        if (!llq_features->max_llq_num) {
                ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
@@ -2955,7 +2975,7 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
        ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -
                (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
 
-       if (ena_dev->tx_max_header_size == 0) {
+       if (unlikely(ena_dev->tx_max_header_size == 0)) {
                ena_trc_err("the size of the LLQ entry is smaller than needed\n");
                return -EINVAL;
        }