i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
enum i40e_filter_pctype pctype,
const struct i40e_fdir_filter_conf *filter,
- bool add);
+ bool add, bool wait_status);
static int
i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
char z_name[RTE_MEMZONE_NAMESIZE];
const struct rte_memzone *mz = NULL;
struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
+ uint16_t i;
if ((pf->flags & I40E_FLAG_FDIR) == 0) {
PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
return I40E_SUCCESS;
}
+
/* make new FDIR VSI */
vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->main_vsi, 0);
if (!vsi) {
goto fail_mem;
}
+ /* enable FDIR MSIX interrupt */
+ vsi->nb_used_qps = 1;
+ i40e_vsi_queues_bind_intr(vsi, I40E_ITR_INDEX_NONE);
+ i40e_vsi_enable_queues_intr(vsi);
+
/* reserve memory for the fdir programming packet */
snprintf(z_name, sizeof(z_name), "%s_%s_%d",
eth_dev->device->driver->name,
I40E_FDIR_MZ_NAME,
eth_dev->data->port_id);
- mz = i40e_memzone_reserve(z_name, I40E_FDIR_PKT_LEN, SOCKET_ID_ANY);
+ mz = i40e_memzone_reserve(z_name, I40E_FDIR_PKT_LEN *
+ I40E_FDIR_PRG_PKT_CNT, SOCKET_ID_ANY);
if (!mz) {
PMD_DRV_LOG(ERR, "Cannot init memzone for "
"flow director program packet.");
err = I40E_ERR_NO_MEMORY;
goto fail_mem;
}
- pf->fdir.prg_pkt = mz->addr;
- pf->fdir.dma_addr = mz->iova;
+
+ for (i = 0; i < I40E_FDIR_PRG_PKT_CNT; i++) {
+ pf->fdir.prg_pkt[i] = (uint8_t *)mz->addr +
+ I40E_FDIR_PKT_LEN * i;
+ pf->fdir.dma_addr[i] = mz->iova +
+ I40E_FDIR_PKT_LEN * i;
+ }
pf->fdir.match_counter_index = I40E_COUNTER_INDEX_FDIR(hw->pf_id);
pf->fdir.fdir_actual_cnt = 0;
vsi = pf->fdir.fdir_vsi;
if (!vsi)
return;
+
+ /* disable FDIR MSIX interrupt */
+ i40e_vsi_queues_unbind_intr(vsi);
+ i40e_vsi_disable_queues_intr(vsi);
+
int err = i40e_switch_tx_queue(hw, vsi->base_queue, FALSE);
if (err)
PMD_DRV_LOG(DEBUG, "Failed to do FDIR TX switch off");
err = i40e_switch_rx_queue(hw, vsi->base_queue, FALSE);
if (err)
PMD_DRV_LOG(DEBUG, "Failed to do FDIR RX switch off");
+
i40e_dev_rx_queue_release(pf->fdir.rxq);
rte_eth_dma_zone_free(dev, "fdir_rx_ring", pf->fdir.rxq->queue_id);
pf->fdir.rxq = NULL;
return ret;
}
+static inline void
+i40e_fdir_programming_status_cleanup(struct i40e_rx_queue *rxq)
+{
+ uint16_t retry_count = 0;
+
+ /* capture the previous error report(if any) from rx ring */
+ while ((i40e_check_fdir_programming_status(rxq) < 0) &&
+ (++retry_count < I40E_FDIR_NUM_RX_DESC))
+ PMD_DRV_LOG(INFO, "error report captured.");
+}
+
static int
i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
struct i40e_fdir_filter *filter)
i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
{
struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *hash_filter;
int ret;
if (filter->fdir.input.flow_ext.pkt_template)
ret);
return ret;
}
- fdir_info->hash_map[ret] = filter;
- TAILQ_INSERT_TAIL(&fdir_info->fdir_list, filter, rules);
+ if (fdir_info->hash_map[ret])
+ return -1;
+
+ hash_filter = &fdir_info->fdir_filter_array[ret];
+ rte_memcpy(hash_filter, filter, sizeof(*filter));
+ fdir_info->hash_map[ret] = hash_filter;
+ TAILQ_INSERT_TAIL(&fdir_info->fdir_list, hash_filter, rules);
return 0;
}
fdir_info->hash_map[ret] = NULL;
TAILQ_REMOVE(&fdir_info->fdir_list, filter, rules);
- rte_free(filter);
return 0;
}
+struct rte_flow *
+i40e_fdir_entry_pool_get(struct i40e_fdir_info *fdir_info)
+{
+ struct rte_flow *flow = NULL;
+ uint64_t slab = 0;
+ uint32_t pos = 0;
+ uint32_t i = 0;
+ int ret;
+
+ if (fdir_info->fdir_actual_cnt >=
+ fdir_info->fdir_space_size) {
+ PMD_DRV_LOG(ERR, "Fdir space full");
+ return NULL;
+ }
+
+ ret = rte_bitmap_scan(fdir_info->fdir_flow_pool.bitmap, &pos,
+ &slab);
+
+ /* normally this won't happen as the fdir_actual_cnt should be
+ * same with the number of the set bits in fdir_flow_pool,
+ * but anyway handle this error condition here for safe
+ */
+ if (ret == 0) {
+ PMD_DRV_LOG(ERR, "fdir_actual_cnt out of sync");
+ return NULL;
+ }
+
+ i = rte_bsf64(slab);
+ pos += i;
+ rte_bitmap_clear(fdir_info->fdir_flow_pool.bitmap, pos);
+ flow = &fdir_info->fdir_flow_pool.pool[pos].flow;
+
+ memset(flow, 0, sizeof(struct rte_flow));
+
+ return flow;
+}
+
+void
+i40e_fdir_entry_pool_put(struct i40e_fdir_info *fdir_info,
+ struct rte_flow *flow)
+{
+ struct i40e_fdir_entry *f;
+
+ f = FLOW_TO_FLOW_BITMAP(flow);
+ rte_bitmap_set(fdir_info->fdir_flow_pool.bitmap, f->idx);
+}
+
/*
* i40e_add_del_fdir_filter - add or remove a flow director filter.
* @pf: board private structure
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
+ unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt[0];
enum i40e_filter_pctype pctype;
int ret = 0;
return ret;
}
+static inline unsigned char *
+i40e_find_available_buffer(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_tx_queue *txq = pf->fdir.txq;
+
+ /* no available buffer
+ * search for more available buffers from the current
+ * descriptor, until an unavailable one
+ */
+ if (fdir_info->txq_available_buf_count <= 0) {
+ uint16_t tmp_tail;
+ volatile struct i40e_tx_desc *tmp_txdp;
+
+ tmp_tail = txq->tx_tail;
+ tmp_txdp = &txq->tx_ring[tmp_tail + 1];
+
+ do {
+ if ((tmp_txdp->cmd_type_offset_bsz &
+ rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
+ rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
+ fdir_info->txq_available_buf_count++;
+ else
+ break;
+
+ tmp_tail += 2;
+ if (tmp_tail >= txq->nb_tx_desc)
+ tmp_tail = 0;
+ } while (tmp_tail != txq->tx_tail);
+ }
+
+ if (fdir_info->txq_available_buf_count > 0)
+ fdir_info->txq_available_buf_count--;
+ else
+ return NULL;
+ return (unsigned char *)fdir_info->prg_pkt[txq->tx_tail >> 1];
+}
+
/**
* i40e_flow_add_del_fdir_filter - add or remove a flow director filter.
* @pf: board private structure
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
+ unsigned char *pkt = NULL;
enum i40e_filter_pctype pctype;
struct i40e_fdir_info *fdir_info = &pf->fdir;
- struct i40e_fdir_filter *fdir_filter, *node;
+ struct i40e_fdir_filter *node;
struct i40e_fdir_filter check_filter; /* Check if the filter exists */
+ bool wait_status = true;
int ret = 0;
if (pf->fdir.fdir_vsi == NULL) {
/* Check if there is the filter in SW list */
memset(&check_filter, 0, sizeof(check_filter));
i40e_fdir_filter_convert(filter, &check_filter);
- node = i40e_sw_fdir_filter_lookup(fdir_info, &check_filter.fdir.input);
- if (add && node) {
- PMD_DRV_LOG(ERR,
- "Conflict with existing flow director rules!");
- return -EINVAL;
- }
- if (!add && !node) {
- PMD_DRV_LOG(ERR,
- "There's no corresponding flow firector filter!");
- return -EINVAL;
+ if (add) {
+ ret = i40e_sw_fdir_filter_insert(pf, &check_filter);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Conflict with existing flow director rules!");
+ return -EINVAL;
+ }
+
+ if (fdir_info->fdir_invalprio == 1 &&
+ fdir_info->fdir_guarantee_free_space > 0)
+ wait_status = false;
+ } else {
+ node = i40e_sw_fdir_filter_lookup(fdir_info,
+ &check_filter.fdir.input);
+ if (!node) {
+ PMD_DRV_LOG(ERR,
+ "There's no corresponding flow firector filter!");
+ return -EINVAL;
+ }
+
+ ret = i40e_sw_fdir_filter_del(pf, &node->fdir.input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Error deleting fdir rule from hash table!");
+ return -EINVAL;
+ }
+
+ if (fdir_info->fdir_invalprio == 1)
+ wait_status = false;
}
- memset(pkt, 0, I40E_FDIR_PKT_LEN);
+ /* find a buffer to store the pkt */
+ pkt = i40e_find_available_buffer(dev);
+ if (pkt == NULL)
+ goto error_op;
+ memset(pkt, 0, I40E_FDIR_PKT_LEN);
ret = i40e_flow_fdir_construct_pkt(pf, &filter->input, pkt);
if (ret < 0) {
PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
- return ret;
+ goto error_op;
}
if (hw->mac.type == I40E_MAC_X722) {
hw, I40E_GLQF_FD_PCTYPES((int)pctype));
}
- ret = i40e_flow_fdir_filter_programming(pf, pctype, filter, add);
+ ret = i40e_flow_fdir_filter_programming(pf, pctype, filter, add,
+ wait_status);
if (ret < 0) {
PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
pctype);
- return ret;
+ goto error_op;
}
if (add) {
if (fdir_info->fdir_invalprio == 1 &&
fdir_info->fdir_guarantee_free_space > 0)
fdir_info->fdir_guarantee_free_space--;
-
- fdir_filter = rte_zmalloc("fdir_filter",
- sizeof(*fdir_filter), 0);
- if (fdir_filter == NULL) {
- PMD_DRV_LOG(ERR, "Failed to alloc memory.");
- return -ENOMEM;
- }
-
- rte_memcpy(fdir_filter, &check_filter, sizeof(check_filter));
- ret = i40e_sw_fdir_filter_insert(pf, fdir_filter);
- if (ret < 0)
- rte_free(fdir_filter);
} else {
fdir_info->fdir_actual_cnt--;
if (fdir_info->fdir_invalprio == 1 &&
fdir_info->fdir_guarantee_free_space <
fdir_info->fdir_guarantee_total_space)
fdir_info->fdir_guarantee_free_space++;
-
- ret = i40e_sw_fdir_filter_del(pf, &node->fdir.input);
}
return ret;
+
+error_op:
+ /* roll back */
+ if (add)
+ i40e_sw_fdir_filter_del(pf, &check_filter.fdir.input);
+ else
+ i40e_sw_fdir_filter_insert(pf, &check_filter);
+
+ return ret;
}
/*
PMD_DRV_LOG(INFO, "filling transmit descriptor.");
txdp = &(txq->tx_ring[txq->tx_tail + 1]);
- txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
+ txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr[0]);
td_cmd = I40E_TX_DESC_CMD_EOP |
I40E_TX_DESC_CMD_RS |
I40E_TX_DESC_CMD_DUMMY;
i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
enum i40e_filter_pctype pctype,
const struct i40e_fdir_filter_conf *filter,
- bool add)
+ bool add, bool wait_status)
{
struct i40e_tx_queue *txq = pf->fdir.txq;
struct i40e_rx_queue *rxq = pf->fdir.rxq;
volatile struct i40e_tx_desc *txdp;
volatile struct i40e_filter_program_desc *fdirdp;
uint32_t td_cmd;
- uint16_t vsi_id, i;
+ uint16_t vsi_id;
uint8_t dest;
+ uint32_t i;
PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
fdirdp = (volatile struct i40e_filter_program_desc *)
PMD_DRV_LOG(INFO, "filling transmit descriptor.");
txdp = &txq->tx_ring[txq->tx_tail + 1];
- txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
+ txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr[txq->tx_tail >> 1]);
+
td_cmd = I40E_TX_DESC_CMD_EOP |
I40E_TX_DESC_CMD_RS |
I40E_TX_DESC_CMD_DUMMY;
txq->tx_tail = 0;
/* Update the tx tail register */
rte_wmb();
+
+ /* fdir program rx queue cleanup */
+ i40e_fdir_programming_status_cleanup(rxq);
+
I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
- for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
- if ((txdp->cmd_type_offset_bsz &
- rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
- rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
- break;
- rte_delay_us(1);
- }
- if (i >= I40E_FDIR_MAX_WAIT_US) {
- PMD_DRV_LOG(ERR,
- "Failed to program FDIR filter: time out to get DD on tx queue.");
- return -ETIMEDOUT;
- }
- /* totally delay 10 ms to check programming status*/
- rte_delay_us(I40E_FDIR_MAX_WAIT_US);
- if (i40e_check_fdir_programming_status(rxq) < 0) {
- PMD_DRV_LOG(ERR,
- "Failed to program FDIR filter: programming status reported.");
- return -ETIMEDOUT;
+
+ if (wait_status) {
+ for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
+ if ((txdp->cmd_type_offset_bsz &
+ rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
+ rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
+ break;
+ rte_delay_us(1);
+ }
+ if (i >= I40E_FDIR_MAX_WAIT_US) {
+ PMD_DRV_LOG(ERR,
+ "Failed to program FDIR filter: time out to get DD on tx queue.");
+ return -ETIMEDOUT;
+ }
+ /* totally delay 10 ms to check programming status*/
+ rte_delay_us(I40E_FDIR_MAX_WAIT_US);
+ if (i40e_check_fdir_programming_status(rxq) < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to program FDIR filter: programming status reported.");
+ return -ETIMEDOUT;
+ }
}
return 0;