X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fice%2Fice_ethdev.c;h=65e43a18f9f2449611f879fd22c0ca17216c3a23;hb=78156d38e112b33032eedfada65b0df8b047bc31;hp=8d62b84805e2b0b68e24c5c045ff75231ebc8d31;hpb=b3d95f1817288ca228f09b9164d6d3ff6249b175;p=dpdk.git diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c index 8d62b84805..65e43a18f9 100644 --- a/drivers/net/ice/ice_ethdev.c +++ b/drivers/net/ice/ice_ethdev.c @@ -18,6 +18,7 @@ #include "base/ice_flow.h" #include "base/ice_dcb.h" #include "base/ice_common.h" +#include "base/ice_ptp_hw.h" #include "rte_pmd_ice.h" #include "ice_ethdev.h" @@ -29,15 +30,26 @@ #define ICE_PIPELINE_MODE_SUPPORT_ARG "pipeline-mode-support" #define ICE_PROTO_XTR_ARG "proto_xtr" #define ICE_HW_DEBUG_MASK_ARG "hw_debug_mask" +#define ICE_ONE_PPS_OUT_ARG "pps_out" +#define ICE_RX_LOW_LATENCY_ARG "rx_low_latency" + +#define ICE_CYCLECOUNTER_MASK 0xffffffffffffffffULL + +uint64_t ice_timestamp_dynflag; +int ice_timestamp_dynfield_offset = -1; static const char * const ice_valid_args[] = { ICE_SAFE_MODE_SUPPORT_ARG, ICE_PIPELINE_MODE_SUPPORT_ARG, ICE_PROTO_XTR_ARG, ICE_HW_DEBUG_MASK_ARG, + ICE_ONE_PPS_OUT_ARG, + ICE_RX_LOW_LATENCY_ARG, NULL }; +#define PPS_OUT_DELAY_NS 1 + static const struct rte_mbuf_dynfield ice_proto_xtr_metadata_param = { .name = "intel_pmd_dynfield_proto_xtr_metadata", .size = sizeof(uint32_t), @@ -141,6 +153,18 @@ static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, struct rte_eth_udp_tunnel *udp_tunnel); static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, struct rte_eth_udp_tunnel *udp_tunnel); +static int ice_timesync_enable(struct rte_eth_dev *dev); +static int ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp, + uint32_t flags); +static int ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp); +static int ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); +static int ice_timesync_read_time(struct rte_eth_dev *dev, + struct timespec *timestamp); +static int ice_timesync_write_time(struct rte_eth_dev *dev, + const struct timespec *timestamp); +static int ice_timesync_disable(struct rte_eth_dev *dev); static const struct rte_pci_id pci_id_ice_map[] = { { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE) }, @@ -184,9 +208,9 @@ static const struct eth_dev_ops ice_eth_dev_ops = { .tx_queue_start = ice_tx_queue_start, .tx_queue_stop = ice_tx_queue_stop, .rx_queue_setup = ice_rx_queue_setup, - .rx_queue_release = ice_rx_queue_release, + .rx_queue_release = ice_dev_rx_queue_release, .tx_queue_setup = ice_tx_queue_setup, - .tx_queue_release = ice_tx_queue_release, + .tx_queue_release = ice_dev_tx_queue_release, .dev_infos_get = ice_dev_info_get, .dev_supported_ptypes_get = ice_dev_supported_ptypes_get, .link_update = ice_link_update, @@ -224,6 +248,13 @@ static const struct eth_dev_ops ice_eth_dev_ops = { .udp_tunnel_port_del = ice_dev_udp_tunnel_port_del, .tx_done_cleanup = ice_tx_done_cleanup, .get_monitor_addr = ice_get_monitor_addr, + .timesync_enable = ice_timesync_enable, + .timesync_read_rx_timestamp = ice_timesync_read_rx_timestamp, + .timesync_read_tx_timestamp = ice_timesync_read_tx_timestamp, + .timesync_adjust_time = ice_timesync_adjust_time, + .timesync_read_time = ice_timesync_read_time, + .timesync_write_time = ice_timesync_write_time, + .timesync_disable = ice_timesync_disable, }; /* store statistics names and its offset in stats structure */ @@ -1104,7 +1135,7 @@ ice_remove_all_mac_vlan_filters(struct ice_vsi *vsi) if (!vsi || !vsi->mac_num) return -EINVAL; - TAILQ_FOREACH_SAFE(m_f, &vsi->mac_list, next, temp) { + RTE_TAILQ_FOREACH_SAFE(m_f, &vsi->mac_list, next, temp) { ret = ice_remove_mac_filter(vsi, &m_f->mac_info.mac_addr); if (ret != ICE_SUCCESS) { ret = -EINVAL; @@ -1115,7 +1146,7 @@ ice_remove_all_mac_vlan_filters(struct ice_vsi *vsi) if (vsi->vlan_num == 0) return 0; - TAILQ_FOREACH_SAFE(v_f, &vsi->vlan_list, next, temp) { + RTE_TAILQ_FOREACH_SAFE(v_f, &vsi->vlan_list, next, temp) { ret = ice_remove_vlan_filter(vsi, &v_f->vlan_info.vlan); if (ret != ICE_SUCCESS) { ret = -EINVAL; @@ -1786,6 +1817,125 @@ parse_u64(const char *key, const char *value, void *args) return 0; } +static int +lookup_pps_type(const char *pps_name) +{ + static struct { + const char *name; + enum pps_type type; + } pps_type_map[] = { + { "pin", PPS_PIN }, + }; + + uint32_t i; + + for (i = 0; i < RTE_DIM(pps_type_map); i++) { + if (strcmp(pps_name, pps_type_map[i].name) == 0) + return pps_type_map[i].type; + } + + return -1; +} + +static int +parse_pin_set(const char *input, int pps_type, struct ice_devargs *devargs) +{ + const char *str = input; + char *end = NULL; + uint32_t idx; + + while (isblank(*str)) + str++; + + if (!isdigit(*str)) + return -1; + + if (pps_type == PPS_PIN) { + idx = strtoul(str, &end, 10); + if (end == NULL || idx >= ICE_MAX_PIN_NUM) + return -1; + + devargs->pin_idx = idx; + devargs->pps_out_ena = 1; + } + + while (isblank(*end)) + end++; + + if (*end != ']') + return -1; + + return 0; +} + +static int +parse_pps_out_parameter(const char *pins, struct ice_devargs *devargs) +{ + const char *pin_start; + uint32_t idx; + int pps_type; + char pps_name[32]; + + while (isblank(*pins)) + pins++; + + pins++; + while (isblank(*pins)) + pins++; + if (*pins == '\0') + return -1; + + for (idx = 0; ; idx++) { + if (isblank(pins[idx]) || + pins[idx] == ':' || + pins[idx] == '\0') + break; + + pps_name[idx] = pins[idx]; + } + pps_name[idx] = '\0'; + pps_type = lookup_pps_type(pps_name); + if (pps_type < 0) + return -1; + + pins += idx; + + pins += strcspn(pins, ":"); + if (*pins++ != ':') + return -1; + while (isblank(*pins)) + pins++; + + pin_start = pins; + + while (isblank(*pins)) + pins++; + + if (parse_pin_set(pin_start, pps_type, devargs) < 0) + return -1; + + return 0; +} + +static int +handle_pps_out_arg(__rte_unused const char *key, const char *value, + void *extra_args) +{ + struct ice_devargs *devargs = extra_args; + + if (value == NULL || extra_args == NULL) + return -EINVAL; + + if (parse_pps_out_parameter(value, devargs) < 0) { + PMD_DRV_LOG(ERR, + "The GPIO pin parameter is wrong : '%s'", + value); + return -1; + } + + return 0; +} + static int ice_parse_devargs(struct rte_eth_dev *dev) { struct ice_adapter *ad = @@ -1827,6 +1977,14 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) if (ret) goto bail; + ret = rte_kvargs_process(kvlist, ICE_ONE_PPS_OUT_ARG, + &handle_pps_out_arg, &ad->devargs); + if (ret) + goto bail; + + ret = rte_kvargs_process(kvlist, ICE_RX_LOW_LATENCY_ARG, + &parse_bool, &ad->devargs.rx_low_latency); + bail: rte_kvargs_free(kvlist); return ret; @@ -2286,6 +2444,9 @@ ice_dev_close(struct rte_eth_dev *dev) struct ice_adapter *ad = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); int ret; + uint32_t val; + uint8_t timer = hw->func_caps.ts_func_info.tmr_index_owned; + uint32_t pin_idx = ad->devargs.pin_idx; if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; @@ -2315,6 +2476,16 @@ ice_dev_close(struct rte_eth_dev *dev) rte_free(pf->proto_xtr); pf->proto_xtr = NULL; + if (ad->devargs.pps_out_ena) { + ICE_WRITE_REG(hw, GLTSYN_AUX_OUT(pin_idx, timer), 0); + ICE_WRITE_REG(hw, GLTSYN_CLKO(pin_idx, timer), 0); + ICE_WRITE_REG(hw, GLTSYN_TGT_L(pin_idx, timer), 0); + ICE_WRITE_REG(hw, GLTSYN_TGT_H(pin_idx, timer), 0); + + val = GLGEN_GPIO_CTL_PIN_DIR_M; + ICE_WRITE_REG(hw, GLGEN_GPIO_CTL(pin_idx), val); + } + /* disable uio intr before callback unregister */ rte_intr_disable(intr_handle); @@ -2822,9 +2993,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf) ETH_RSS_NONFRAG_IPV4_TCP | \ ETH_RSS_NONFRAG_IPV6_TCP | \ ETH_RSS_NONFRAG_IPV4_SCTP | \ - ETH_RSS_NONFRAG_IPV6_SCTP | \ - ETH_RSS_FRAG_IPV4 | \ - ETH_RSS_FRAG_IPV6) + ETH_RSS_NONFRAG_IPV6_SCTP) ret = ice_rem_vsi_rss_cfg(hw, vsi->idx); if (ret) @@ -2979,24 +3148,6 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf) __func__, ret); } - if (rss_hf & ETH_RSS_FRAG_IPV4) { - cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_FRAG; - cfg.hash_flds = ICE_FLOW_HASH_IPV4; - ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); - if (ret) - PMD_DRV_LOG(ERR, "%s IPV4_FRAG rss flow fail %d", - __func__, ret); - } - - if (rss_hf & ETH_RSS_FRAG_IPV6) { - cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_FRAG; - cfg.hash_flds = ICE_FLOW_HASH_IPV6; - ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); - if (ret) - PMD_DRV_LOG(ERR, "%s IPV6_FRAG rss flow fail %d", - __func__, ret); - } - pf->rss_hf = rss_hf & ICE_RSS_HF_ALL; } @@ -3150,8 +3301,9 @@ __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect, { struct ice_hw *hw = ICE_VSI_TO_HW(vsi); uint32_t val, val_tx; - int i; + int rx_low_latency, i; + rx_low_latency = vsi->adapter->devargs.rx_low_latency; for (i = 0; i < nb_queue; i++) { /*do actual bind*/ val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) | @@ -3161,8 +3313,21 @@ __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect, PMD_DRV_LOG(INFO, "queue %d is binding to vect %d", base_queue + i, msix_vect); + /* set ITR0 value */ - ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x2); + if (rx_low_latency) { + /** + * Empirical configuration for optimal real time + * latency reduced interrupt throttling to 2us + */ + ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x1); + ICE_WRITE_REG(hw, QRX_ITR(base_queue + i), + QRX_ITR_NO_EXPR_M); + } else { + ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x2); + ICE_WRITE_REG(hw, QRX_ITR(base_queue + i), 0); + } + ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val); ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx); } @@ -3317,6 +3482,49 @@ ice_get_init_link_status(struct rte_eth_dev *dev) pf->init_link_up = true; } +static int +ice_pps_out_cfg(struct ice_hw *hw, int idx, int timer) +{ + uint64_t current_time, start_time; + uint32_t hi, lo, lo2, func, val; + + lo = ICE_READ_REG(hw, GLTSYN_TIME_L(timer)); + hi = ICE_READ_REG(hw, GLTSYN_TIME_H(timer)); + lo2 = ICE_READ_REG(hw, GLTSYN_TIME_L(timer)); + + if (lo2 < lo) { + lo = ICE_READ_REG(hw, GLTSYN_TIME_L(timer)); + hi = ICE_READ_REG(hw, GLTSYN_TIME_H(timer)); + } + + current_time = ((uint64_t)hi << 32) | lo; + + start_time = (current_time + NSEC_PER_SEC) / + NSEC_PER_SEC * NSEC_PER_SEC; + start_time = start_time - PPS_OUT_DELAY_NS; + + func = 8 + idx + timer * 4; + val = GLGEN_GPIO_CTL_PIN_DIR_M | + ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) & + GLGEN_GPIO_CTL_PIN_FUNC_M); + + /* Write clkout with half of period value */ + ICE_WRITE_REG(hw, GLTSYN_CLKO(idx, timer), NSEC_PER_SEC / 2); + + /* Write TARGET time register */ + ICE_WRITE_REG(hw, GLTSYN_TGT_L(idx, timer), start_time & 0xffffffff); + ICE_WRITE_REG(hw, GLTSYN_TGT_H(idx, timer), start_time >> 32); + + /* Write AUX_OUT register */ + ICE_WRITE_REG(hw, GLTSYN_AUX_OUT(idx, timer), + GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M); + + /* Write GPIO CTL register */ + ICE_WRITE_REG(hw, GLGEN_GPIO_CTL(idx), val); + + return 0; +} + static int ice_dev_start(struct rte_eth_dev *dev) { @@ -3324,10 +3532,14 @@ ice_dev_start(struct rte_eth_dev *dev) struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct ice_vsi *vsi = pf->main_vsi; + struct ice_adapter *ad = + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); uint16_t nb_rxq = 0; uint16_t nb_txq, i; uint16_t max_frame_size; int mask, ret; + uint8_t timer = hw->func_caps.ts_func_info.tmr_index_owned; + uint32_t pin_idx = ad->devargs.pin_idx; /* program Tx queues' context in hardware */ for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) { @@ -3398,6 +3610,14 @@ ice_dev_start(struct rte_eth_dev *dev) /* Set the max frame size to HW*/ ice_aq_set_mac_cfg(hw, max_frame_size, NULL); + if (ad->devargs.pps_out_ena) { + ret = ice_pps_out_cfg(hw, pin_idx, timer); + if (ret) { + PMD_DRV_LOG(ERR, "Fail to configure 1pps out"); + goto rx_err; + } + } + return 0; /* stop the started queues if failed to start all queues */ @@ -3475,7 +3695,8 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_RX_OFFLOAD_QINQ_STRIP | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_RX_OFFLOAD_VLAN_EXTEND | - DEV_RX_OFFLOAD_RSS_HASH; + DEV_RX_OFFLOAD_RSS_HASH | + DEV_RX_OFFLOAD_TIMESTAMP; dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_QINQ_INSERT | DEV_TX_OFFLOAD_IPV4_CKSUM | @@ -5286,6 +5507,184 @@ ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, return ret; } +static int +ice_timesync_enable(struct rte_eth_dev *dev) +{ + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_adapter *ad = + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + int ret; + + if (dev->data->dev_started && !(dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_TIMESTAMP)) { + PMD_DRV_LOG(ERR, "Rx timestamp offload not configured"); + return -1; + } + + if (hw->func_caps.ts_func_info.src_tmr_owned) { + ret = ice_ptp_init_phc(hw); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to initialize PHC"); + return -1; + } + + ret = ice_ptp_write_incval(hw, ICE_PTP_NOMINAL_INCVAL_E810); + if (ret) { + PMD_DRV_LOG(ERR, + "Failed to write PHC increment time value"); + return -1; + } + } + + /* Initialize cycle counters for system time/RX/TX timestamp */ + memset(&ad->systime_tc, 0, sizeof(struct rte_timecounter)); + memset(&ad->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); + memset(&ad->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); + + ad->systime_tc.cc_mask = ICE_CYCLECOUNTER_MASK; + ad->systime_tc.cc_shift = 0; + ad->systime_tc.nsec_mask = 0; + + ad->rx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK; + ad->rx_tstamp_tc.cc_shift = 0; + ad->rx_tstamp_tc.nsec_mask = 0; + + ad->tx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK; + ad->tx_tstamp_tc.cc_shift = 0; + ad->tx_tstamp_tc.nsec_mask = 0; + + ad->ptp_ena = 1; + + return 0; +} + +static int +ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp, uint32_t flags) +{ + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_adapter *ad = + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct ice_rx_queue *rxq; + uint32_t ts_high; + uint64_t ts_ns, ns; + + rxq = dev->data->rx_queues[flags]; + + ts_high = rxq->time_high; + ts_ns = ice_tstamp_convert_32b_64b(hw, ts_high); + ns = rte_timecounter_update(&ad->rx_tstamp_tc, ts_ns); + *timestamp = rte_ns_to_timespec(ns); + + return 0; +} + +static int +ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp) +{ + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_adapter *ad = + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + uint8_t lport; + uint64_t ts_ns, ns, tstamp; + const uint64_t mask = 0xFFFFFFFF; + int ret; + + lport = hw->port_info->lport; + + ret = ice_read_phy_tstamp(hw, lport, 0, &tstamp); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to read phy timestamp"); + return -1; + } + + ts_ns = ice_tstamp_convert_32b_64b(hw, (tstamp >> 8) & mask); + ns = rte_timecounter_update(&ad->tx_tstamp_tc, ts_ns); + *timestamp = rte_ns_to_timespec(ns); + + return 0; +} + +static int +ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) +{ + struct ice_adapter *ad = + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + + ad->systime_tc.nsec += delta; + ad->rx_tstamp_tc.nsec += delta; + ad->tx_tstamp_tc.nsec += delta; + + return 0; +} + +static int +ice_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) +{ + struct ice_adapter *ad = + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + uint64_t ns; + + ns = rte_timespec_to_ns(ts); + + ad->systime_tc.nsec = ns; + ad->rx_tstamp_tc.nsec = ns; + ad->tx_tstamp_tc.nsec = ns; + + return 0; +} + +static int +ice_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) +{ + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_adapter *ad = + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + uint32_t hi, lo, lo2; + uint64_t time, ns; + + lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0)); + hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0)); + lo2 = ICE_READ_REG(hw, GLTSYN_TIME_L(0)); + + if (lo2 < lo) { + lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0)); + hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0)); + } + + time = ((uint64_t)hi << 32) | lo; + ns = rte_timecounter_update(&ad->systime_tc, time); + *ts = rte_ns_to_timespec(ns); + + return 0; +} + +static int +ice_timesync_disable(struct rte_eth_dev *dev) +{ + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_adapter *ad = + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + uint64_t val; + uint8_t lport; + + lport = hw->port_info->lport; + + ice_clear_phy_tstamp(hw, lport, 0); + + val = ICE_READ_REG(hw, GLTSYN_ENA(0)); + val &= ~GLTSYN_ENA_TSYN_ENA_M; + ICE_WRITE_REG(hw, GLTSYN_ENA(0), val); + + ICE_WRITE_REG(hw, GLTSYN_INCVAL_L(0), 0); + ICE_WRITE_REG(hw, GLTSYN_INCVAL_H(0), 0); + + ad->ptp_ena = 0; + + return 0; +} + static int ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *pci_dev) @@ -5320,7 +5719,8 @@ RTE_PMD_REGISTER_PARAM_STRING(net_ice, ICE_HW_DEBUG_MASK_ARG "=0xXXX" ICE_PROTO_XTR_ARG "=[queue:]" ICE_SAFE_MODE_SUPPORT_ARG "=<0|1>" - ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>"); + ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>" + ICE_RX_LOW_LATENCY_ARG "=<0|1>"); RTE_LOG_REGISTER_SUFFIX(ice_logtype_init, init, NOTICE); RTE_LOG_REGISTER_SUFFIX(ice_logtype_driver, driver, NOTICE);