struct bnxt *bp = eth_dev->data->dev_private;
int rc = 0;
+ if (!BNXT_SINGLE_PF(bp))
+ return -ENOTSUP;
+
if (!bp->link_info->link_up)
rc = bnxt_set_hwrm_link_config(bp, true);
if (!rc)
{
struct bnxt *bp = eth_dev->data->dev_private;
+ if (!BNXT_SINGLE_PF(bp))
+ return -ENOTSUP;
+
eth_dev->data->dev_link.link_status = 0;
bnxt_set_hwrm_link_config(bp, false);
bp->link_info->link_up = 0;
}
}
+static void bnxt_ptp_get_current_time(void *arg)
+{
+ struct bnxt *bp = arg;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ int rc;
+
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return;
+
+ if (!ptp)
+ return;
+
+ bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME,
+ &ptp->current_time);
+
+ rc = rte_eal_alarm_set(US_PER_S, bnxt_ptp_get_current_time, (void *)bp);
+ if (rc != 0) {
+ PMD_DRV_LOG(ERR, "Failed to re-schedule PTP alarm\n");
+ bp->flags2 &= ~BNXT_FLAGS2_PTP_ALARM_SCHEDULED;
+ }
+}
+
+static int bnxt_schedule_ptp_alarm(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ int rc;
+
+ if (bp->flags2 & BNXT_FLAGS2_PTP_ALARM_SCHEDULED)
+ return 0;
+
+ bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME,
+ &ptp->current_time);
+
+ rc = rte_eal_alarm_set(US_PER_S, bnxt_ptp_get_current_time, (void *)bp);
+ return rc;
+}
+
+static void bnxt_cancel_ptp_alarm(struct bnxt *bp)
+{
+ if (bp->flags2 & BNXT_FLAGS2_PTP_ALARM_SCHEDULED) {
+ rte_eal_alarm_cancel(bnxt_ptp_get_current_time, (void *)bp);
+ bp->flags2 &= ~BNXT_FLAGS2_PTP_ALARM_SCHEDULED;
+ }
+}
+
+static void bnxt_ptp_stop(struct bnxt *bp)
+{
+ bnxt_cancel_ptp_alarm(bp);
+ bp->flags2 &= ~BNXT_FLAGS2_PTP_TIMESYNC_ENABLED;
+}
+
+static int bnxt_ptp_start(struct bnxt *bp)
+{
+ int rc;
+
+ rc = bnxt_schedule_ptp_alarm(bp);
+ if (rc != 0) {
+ PMD_DRV_LOG(ERR, "Failed to schedule PTP alarm\n");
+ } else {
+ bp->flags2 |= BNXT_FLAGS2_PTP_TIMESYNC_ENABLED;
+ bp->flags2 |= BNXT_FLAGS2_PTP_ALARM_SCHEDULED;
+ }
+
+ return rc;
+}
+
static int bnxt_dev_stop(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = eth_dev->data->dev_private;
bnxt_cancel_fw_health_check(bp);
+ if (BNXT_P5_PTP_TIMESYNC_ENABLED(bp))
+ bnxt_cancel_ptp_alarm(bp);
+
/* Do not bring link down during reset recovery */
if (!is_bnxt_in_error(bp)) {
bnxt_dev_set_link_down_op(eth_dev);
bnxt_schedule_fw_health_check(bp);
+ if (BNXT_P5_PTP_TIMESYNC_ENABLED(bp))
+ bnxt_schedule_ptp_alarm(bp);
+
return 0;
error:
rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone);
bp->rx_mem_zone = NULL;
- bnxt_hwrm_free_vf_info(bp);
+ bnxt_free_vf_info(bp);
rte_free(bp->grp_info);
bp->grp_info = NULL;
}
bnxt_del_dflt_mac_filter(bp, vnic);
+ rc = bnxt_hwrm_vnic_ctx_free(bp, vnic);
+ if (rc)
+ return rc;
+
rc = bnxt_hwrm_vnic_free(bp, vnic);
if (rc)
return rc;
static int
bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset)
{
- struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
- struct bnxt_rx_ring_info *rxr;
+ struct bnxt_rx_queue *rxq = rx_queue;
struct bnxt_cp_ring_info *cpr;
- struct rte_mbuf *rx_buf;
+ struct bnxt_rx_ring_info *rxr;
+ uint32_t desc, raw_cons;
+ struct bnxt *bp = rxq->bp;
struct rx_pkt_cmpl *rxcmp;
- uint32_t cons, cp_cons;
int rc;
- if (!rxq)
- return -EINVAL;
-
- rc = is_bnxt_in_error(rxq->bp);
+ rc = is_bnxt_in_error(bp);
if (rc)
return rc;
- cpr = rxq->cp_ring;
- rxr = rxq->rx_ring;
-
if (offset >= rxq->nb_rx_desc)
return -EINVAL;
- cons = RING_CMP(cpr->cp_ring_struct, offset);
- cp_cons = cpr->cp_raw_cons;
- rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
+ rxr = rxq->rx_ring;
+ cpr = rxq->cp_ring;
- if (cons > cp_cons) {
- if (CMPL_VALID(rxcmp, cpr->valid))
- return RTE_ETH_RX_DESC_DONE;
- } else {
- if (CMPL_VALID(rxcmp, !cpr->valid))
+ /*
+ * For the vector receive case, the completion at the requested
+ * offset can be indexed directly.
+ */
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
+ if (bp->flags & BNXT_FLAG_RX_VECTOR_PKT_MODE) {
+ struct rx_pkt_cmpl *rxcmp;
+ uint32_t cons;
+
+ /* Check status of completion descriptor. */
+ raw_cons = cpr->cp_raw_cons +
+ offset * CMP_LEN(CMPL_BASE_TYPE_RX_L2);
+ cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
+ rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
+
+ if (CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
return RTE_ETH_RX_DESC_DONE;
+
+ /* Check whether rx desc has an mbuf attached. */
+ cons = RING_CMP(rxr->rx_ring_struct, raw_cons / 2);
+ if (cons >= rxq->rxrearm_start &&
+ cons < rxq->rxrearm_start + rxq->rxrearm_nb) {
+ return RTE_ETH_RX_DESC_UNAVAIL;
+ }
+
+ return RTE_ETH_RX_DESC_AVAIL;
}
- rx_buf = rxr->rx_buf_ring[cons];
- if (rx_buf == NULL || rx_buf == &rxq->fake_mbuf)
- return RTE_ETH_RX_DESC_UNAVAIL;
+#endif
+ /*
+ * For the non-vector receive case, scan the completion ring to
+ * locate the completion descriptor for the requested offset.
+ */
+ raw_cons = cpr->cp_raw_cons;
+ desc = 0;
+ while (1) {
+ uint32_t agg_cnt, cons, cmpl_type;
+
+ cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
+ rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
+
+ if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
+ break;
+
+ cmpl_type = CMP_TYPE(rxcmp);
+
+ switch (cmpl_type) {
+ case CMPL_BASE_TYPE_RX_L2:
+ case CMPL_BASE_TYPE_RX_L2_V2:
+ if (desc == offset) {
+ cons = rxcmp->opaque;
+ if (rxr->rx_buf_ring[cons])
+ return RTE_ETH_RX_DESC_DONE;
+ else
+ return RTE_ETH_RX_DESC_UNAVAIL;
+ }
+ agg_cnt = BNXT_RX_L2_AGG_BUFS(rxcmp);
+ raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt;
+ desc++;
+ break;
+
+ case CMPL_BASE_TYPE_RX_TPA_END:
+ if (desc == offset)
+ return RTE_ETH_RX_DESC_DONE;
+
+ if (BNXT_CHIP_P5(rxq->bp)) {
+ struct rx_tpa_v2_end_cmpl_hi *p5_tpa_end;
+
+ p5_tpa_end = (void *)rxcmp;
+ agg_cnt = BNXT_TPA_END_AGG_BUFS_TH(p5_tpa_end);
+ } else {
+ struct rx_tpa_end_cmpl *tpa_end;
+
+ tpa_end = (void *)rxcmp;
+ agg_cnt = BNXT_TPA_END_AGG_BUFS(tpa_end);
+ }
+
+ raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt;
+ desc++;
+ break;
+
+ default:
+ raw_cons += CMP_LEN(cmpl_type);
+ }
+ }
return RTE_ETH_RX_DESC_AVAIL;
}
struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
struct bnxt_tx_ring_info *txr;
struct bnxt_cp_ring_info *cpr;
- struct bnxt_sw_tx_bd *tx_buf;
+ struct rte_mbuf **tx_buf;
struct tx_pkt_cmpl *txcmp;
uint32_t cons, cp_cons;
int rc;
return RTE_ETH_TX_DESC_UNAVAIL;
}
tx_buf = &txr->tx_buf_ring[cons];
- if (tx_buf->mbuf == NULL)
+ if (*tx_buf == NULL)
return RTE_ETH_TX_DESC_DONE;
return RTE_ETH_TX_DESC_FULL;
}
int
-bnxt_filter_ctrl_op(struct rte_eth_dev *dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op, void *arg)
+bnxt_flow_ops_get_op(struct rte_eth_dev *dev,
+ const struct rte_flow_ops **ops)
{
struct bnxt *bp = dev->data->dev_private;
int ret = 0;
bp = vfr->parent_dev->data->dev_private;
/* parent is deleted while children are still valid */
if (!bp) {
- PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR Error %d:%d\n",
- dev->data->port_id,
- filter_type,
- filter_op);
+ PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR Error\n",
+ dev->data->port_id);
return -EIO;
}
}
if (ret)
return ret;
- switch (filter_type) {
- case RTE_ETH_FILTER_GENERIC:
- if (filter_op != RTE_ETH_FILTER_GET)
- return -EINVAL;
+ /* PMD supports thread-safe flow operations. rte_flow API
+ * functions can avoid mutex for multi-thread safety.
+ */
+ dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
- /* PMD supports thread-safe flow operations. rte_flow API
- * functions can avoid mutex for multi-thread safety.
- */
- dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
+ if (BNXT_TRUFLOW_EN(bp))
+ *ops = &bnxt_ulp_rte_flow_ops;
+ else
+ *ops = &bnxt_flow_ops;
- if (BNXT_TRUFLOW_EN(bp))
- *(const void **)arg = &bnxt_ulp_rte_flow_ops;
- else
- *(const void **)arg = &bnxt_flow_ops;
- break;
- default:
- PMD_DRV_LOG(ERR,
- "Filter type (%d) not supported", filter_type);
- ret = -EINVAL;
- break;
- }
return ret;
}
ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L]));
*ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32;
+ rte_read32((uint8_t *)bp->bar0 + ptp->tx_mapped_regs[BNXT_PTP_TX_SEQ]);
return 0;
}
uint16_t port_id;
uint32_t fifo;
- if (!ptp)
- return -ENODEV;
-
fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
if (!(fifo & BNXT_PTP_RX_FIFO_PENDING))
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
if (!ptp)
- return 0;
+ return -ENOTSUP;
ns = rte_timespec_to_ns(ts);
/* Set the timecounters to a new value. */
ptp->tc.nsec = ns;
+ ptp->tx_tstamp_tc.nsec = ns;
+ ptp->rx_tstamp_tc.nsec = ns;
return 0;
}
int rc = 0;
if (!ptp)
- return 0;
+ return -ENOTSUP;
if (BNXT_CHIP_P5(bp))
rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME,
int rc;
if (!ptp)
- return 0;
+ return -ENOTSUP;
ptp->rx_filter = 1;
ptp->tx_tstamp_en = 1;
if (!BNXT_CHIP_P5(bp))
bnxt_map_ptp_regs(bp);
+ else
+ rc = bnxt_ptp_start(bp);
- return 0;
+ return rc;
}
static int
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
if (!ptp)
- return 0;
+ return -ENOTSUP;
ptp->rx_filter = 0;
ptp->tx_tstamp_en = 0;
if (!BNXT_CHIP_P5(bp))
bnxt_unmap_ptp_regs(bp);
+ else
+ bnxt_ptp_stop(bp);
return 0;
}
uint64_t ns;
if (!ptp)
- return 0;
+ return -ENOTSUP;
if (BNXT_CHIP_P5(bp))
rx_tstamp_cycles = ptp->rx_timestamp;
int rc = 0;
if (!ptp)
- return 0;
+ return -ENOTSUP;
if (BNXT_CHIP_P5(bp))
rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_PATH_TX,
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
if (!ptp)
- return 0;
+ return -ENOTSUP;
ptp->tc.nsec += delta;
+ ptp->tx_tstamp_tc.nsec += delta;
+ ptp->rx_tstamp_tc.nsec += delta;
return 0;
}
.rx_queue_stop = bnxt_rx_queue_stop,
.tx_queue_start = bnxt_tx_queue_start,
.tx_queue_stop = bnxt_tx_queue_stop,
- .filter_ctrl = bnxt_filter_ctrl_op,
+ .flow_ops_get = bnxt_flow_ops_get_op,
.dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op,
.get_eeprom_length = bnxt_get_eeprom_length_op,
.get_eeprom = bnxt_get_eeprom_op,
uint32_t val = info->reset_reg_val[index];
uint32_t reg = info->reset_reg[index];
uint32_t type, offset;
+ int ret;
type = BNXT_FW_STATUS_REG_TYPE(reg);
offset = BNXT_FW_STATUS_REG_OFF(reg);
switch (type) {
case BNXT_FW_STATUS_REG_TYPE_CFG:
- rte_pci_write_config(bp->pdev, &val, sizeof(val), offset);
+ ret = rte_pci_write_config(bp->pdev, &val, sizeof(val), offset);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to write %#x at PCI offset %#x",
+ val, offset);
+ return;
+ }
break;
case BNXT_FW_STATUS_REG_TYPE_GRC:
offset = bnxt_map_reset_regs(bp, offset);
return 0;
}
-static void
+static int
bnxt_parse_dev_args(struct bnxt *bp, struct rte_devargs *devargs)
{
struct rte_kvargs *kvlist;
+ int ret;
if (devargs == NULL)
- return;
+ return 0;
kvlist = rte_kvargs_parse(devargs->args, bnxt_dev_args);
if (kvlist == NULL)
- return;
+ return -EINVAL;
/*
* Handler for "truflow" devarg.
* Invoked as for ex: "-a 0000:00:0d.0,host-based-truflow=1"
*/
- rte_kvargs_process(kvlist, BNXT_DEVARG_TRUFLOW,
- bnxt_parse_devarg_truflow, bp);
+ ret = rte_kvargs_process(kvlist, BNXT_DEVARG_TRUFLOW,
+ bnxt_parse_devarg_truflow, bp);
+ if (ret)
+ goto err;
/*
* Handler for "flow_xstat" devarg.
* Invoked as for ex: "-a 0000:00:0d.0,flow_xstat=1"
*/
- rte_kvargs_process(kvlist, BNXT_DEVARG_FLOW_XSTAT,
- bnxt_parse_devarg_flow_xstat, bp);
+ ret = rte_kvargs_process(kvlist, BNXT_DEVARG_FLOW_XSTAT,
+ bnxt_parse_devarg_flow_xstat, bp);
+ if (ret)
+ goto err;
/*
* Handler for "max_num_kflows" devarg.
* Invoked as for ex: "-a 000:00:0d.0,max_num_kflows=32"
*/
- rte_kvargs_process(kvlist, BNXT_DEVARG_MAX_NUM_KFLOWS,
- bnxt_parse_devarg_max_num_kflows, bp);
+ ret = rte_kvargs_process(kvlist, BNXT_DEVARG_MAX_NUM_KFLOWS,
+ bnxt_parse_devarg_max_num_kflows, bp);
+ if (ret)
+ goto err;
+err:
rte_kvargs_free(kvlist);
+ return ret;
}
static int bnxt_alloc_switch_domain(struct bnxt *bp)
rc = bnxt_alloc_hwrm_resources(bp);
if (rc) {
PMD_DRV_LOG(ERR,
- "Failed to allocate hwrm resource rc: %x\n", rc);
+ "Failed to allocate response buffer rc: %x\n", rc);
return rc;
}
rc = bnxt_alloc_leds_info(bp);
bp = eth_dev->data->dev_private;
/* Parse dev arguments passed on when starting the DPDK application. */
- bnxt_parse_dev_args(bp, pci_dev->device.devargs);
+ rc = bnxt_parse_dev_args(bp, pci_dev->device.devargs);
+ if (rc)
+ goto error_free;
rc = bnxt_drv_init(eth_dev);
if (rc)
int i, ret = 0;
struct rte_kvargs *kvlist = NULL;
+ if (eth_da->type == RTE_ETH_REPRESENTOR_NONE)
+ return 0;
+ if (eth_da->type != RTE_ETH_REPRESENTOR_VF) {
+ PMD_DRV_LOG(ERR, "unsupported representor type %d\n",
+ eth_da->type);
+ return -ENOTSUP;
+ }
num_rep = eth_da->nb_representor_ports;
if (num_rep > BNXT_MAX_VF_REPS) {
PMD_DRV_LOG(ERR, "nb_representor_ports = %d > %d MAX VF REPS\n",