]> git.droids-corp.org - dpdk.git/commitdiff
net/hns3: remove some blank lines
authorLijun Ou <oulijun@huawei.com>
Mon, 9 Nov 2020 14:29:02 +0000 (22:29 +0800)
committerFerruh Yigit <ferruh.yigit@intel.com>
Fri, 13 Nov 2020 18:43:26 +0000 (19:43 +0100)
According to the rule of the static check tools
that arrange blank lines properly to keep the
code compact, here remove some unnecessary blank
line to fix the above rule warning.

Signed-off-by: Lijun Ou <oulijun@huawei.com>
drivers/net/hns3/hns3_cmd.c
drivers/net/hns3/hns3_ethdev.c
drivers/net/hns3/hns3_flow.c
drivers/net/hns3/hns3_rxtx.c
drivers/net/hns3/hns3_stats.c

index 4f52ed034d4865ff10df645b7c6f3d9838c2a45b..f58f4f7adcea4d5ef6960cebaebe6b8281172290 100644 (file)
@@ -198,7 +198,6 @@ hns3_cmd_csq_clean(struct hns3_hw *hw)
        int clean;
 
        head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
-
        if (!is_valid_csq_clean_head(csq, head)) {
                hns3_err(hw, "wrong cmd head (%u, %u-%u)", head,
                            csq->next_to_use, csq->next_to_clean);
index 79c03894d368c1e91042cad8005432e4b45c0e7e..201137887980a8ec72104932f25d5f9f343d9e23 100644 (file)
@@ -238,7 +238,6 @@ hns3_interrupt_handler(void *param)
        hns3_pf_disable_irq0(hw);
 
        event_cause = hns3_check_event_cause(hns, &clearval);
-
        /* vector 0 interrupt is shared with reset and mailbox source events. */
        if (event_cause == HNS3_VECTOR0_EVENT_ERR) {
                hns3_warn(hw, "Received err interrupt");
@@ -3556,9 +3555,7 @@ hns3_drop_pfc_buf_till_fit(struct hns3_hw *hw,
        for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) {
                priv = &buf_alloc->priv_buf[i];
                mask = BIT((uint8_t)i);
-
-               if (hw->hw_tc_map & mask &&
-                   hw->dcb_info.hw_pfc_map & mask) {
+               if (hw->hw_tc_map & mask && hw->dcb_info.hw_pfc_map & mask) {
                        /* Reduce the number of pfc TC with private buffer */
                        priv->wl.low = 0;
                        priv->enable = 0;
@@ -3612,7 +3609,6 @@ hns3_only_alloc_priv_buff(struct hns3_hw *hw,
 
        for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
                priv = &buf_alloc->priv_buf[i];
-
                priv->enable = 0;
                priv->wl.low = 0;
                priv->wl.high = 0;
index 2fff1578f633225b24d7d8de94e75588fc564349..ee6ec154988e06d71bfb5b5dbdae9bb6240d1248 100644 (file)
@@ -209,8 +209,7 @@ hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow,
 
        ret = hns3_get_count(&hns->hw, flow->counter_id, &value);
        if (ret) {
-               rte_flow_error_set(error, -ret,
-                                  RTE_FLOW_ERROR_TYPE_HANDLE,
+               rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE,
                                   NULL, "Read counter fail.");
                return ret;
        }
@@ -547,7 +546,6 @@ hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
 
        if (item->mask) {
                ipv4_mask = item->mask;
-
                if (ipv4_mask->hdr.total_length ||
                    ipv4_mask->hdr.packet_id ||
                    ipv4_mask->hdr.fragment_offset ||
@@ -616,8 +614,8 @@ hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
 
        if (item->mask) {
                ipv6_mask = item->mask;
-               if (ipv6_mask->hdr.vtc_flow ||
-                   ipv6_mask->hdr.payload_len || ipv6_mask->hdr.hop_limits) {
+               if (ipv6_mask->hdr.vtc_flow || ipv6_mask->hdr.payload_len ||
+                   ipv6_mask->hdr.hop_limits) {
                        return rte_flow_error_set(error, EINVAL,
                                                  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
                                                  item,
@@ -672,12 +670,10 @@ hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
 
        if (item->mask) {
                tcp_mask = item->mask;
-               if (tcp_mask->hdr.sent_seq ||
-                   tcp_mask->hdr.recv_ack ||
-                   tcp_mask->hdr.data_off ||
-                   tcp_mask->hdr.tcp_flags ||
-                   tcp_mask->hdr.rx_win ||
-                   tcp_mask->hdr.cksum || tcp_mask->hdr.tcp_urp) {
+               if (tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack ||
+                   tcp_mask->hdr.data_off || tcp_mask->hdr.tcp_flags ||
+                   tcp_mask->hdr.rx_win || tcp_mask->hdr.cksum ||
+                   tcp_mask->hdr.tcp_urp) {
                        return rte_flow_error_set(error, EINVAL,
                                                  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
                                                  item,
@@ -776,7 +772,6 @@ hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
                                                  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
                                                  item,
                                                  "Only support src & dst port in SCTP");
-
                if (sctp_mask->hdr.src_port) {
                        hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
                        rule->key_conf.mask.src_port =
@@ -1069,8 +1064,7 @@ hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
 }
 
 static int
-hns3_parse_normal(const struct rte_flow_item *item,
-                 struct hns3_fdir_rule *rule,
+hns3_parse_normal(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
                  struct items_step_mngr *step_mngr,
                  struct rte_flow_error *error)
 {
@@ -1331,9 +1325,8 @@ hns3_rss_conf_copy(struct hns3_rss_conf *out,
                .key_len = in->key_len,
                .queue_num = in->queue_num,
        };
-       out->conf.queue =
-               memcpy(out->queue, in->queue,
-                      sizeof(*in->queue) * in->queue_num);
+       out->conf.queue = memcpy(out->queue, in->queue,
+                               sizeof(*in->queue) * in->queue_num);
        if (in->key)
                out->conf.key = memcpy(out->key, in->key, in->key_len);
 
@@ -1783,17 +1776,15 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
 
        flow = rte_zmalloc("hns3 flow", sizeof(struct rte_flow), 0);
        if (flow == NULL) {
-               rte_flow_error_set(error, ENOMEM,
-                                  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-                                  "Failed to allocate flow memory");
+               rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+                                  NULL, "Failed to allocate flow memory");
                return NULL;
        }
        flow_node = rte_zmalloc("hns3 flow node",
                                sizeof(struct hns3_flow_mem), 0);
        if (flow_node == NULL) {
-               rte_flow_error_set(error, ENOMEM,
-                                  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-                                  "Failed to allocate flow list memory");
+               rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+                                  NULL, "Failed to allocate flow list memory");
                rte_free(flow);
                return NULL;
        }
index 4b88b4692338b8e8ed6ac8b9a449a37686566899..c76e635ff112e0497bbbaa760475fe82c27ccf0b 100644 (file)
@@ -1584,7 +1584,6 @@ hns3_rx_buf_len_calc(struct rte_mempool *mp, uint16_t *rx_buf_len)
 
        vld_buf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
                        RTE_PKTMBUF_HEADROOM);
-
        if (vld_buf_size < HNS3_MIN_BD_BUF_SIZE)
                return -EINVAL;
 
index c590647b00637ccc23f6447fb706886e2d30591b..91168ac95ad5a898e9f7d4792f047c8507b4bd81 100644 (file)
@@ -679,7 +679,6 @@ hns3_get_queue_stats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
                        (*count)++;
                }
        }
-
 }
 
 void