#define IAVF_NUM_MACADDR_MAX 64
+#define IAVF_DEV_WATCHDOG_PERIOD 0
+
#define IAVF_DEFAULT_RX_PTHRESH 8
#define IAVF_DEFAULT_RX_HTHRESH 8
#define IAVF_DEFAULT_RX_WTHRESH 0
struct iavf_rx_queue;
struct iavf_tx_queue;
+
+struct iavf_ipsec_crypto_stats {
+ uint64_t icount;
+ uint64_t ibytes;
+ struct {
+ uint64_t count;
+ uint64_t sad_miss;
+ uint64_t not_processed;
+ uint64_t icv_check;
+ uint64_t ipsec_length;
+ uint64_t misc;
+ } ierrors;
+};
+
+struct iavf_eth_xstats {
+ struct virtchnl_eth_stats eth_stats;
+ struct iavf_ipsec_crypto_stats ips_stats;
+};
+
/* Structure that defines a VSI, associated with a adapter. */
struct iavf_vsi {
struct iavf_adapter *adapter; /* Backreference to associated adapter */
uint16_t max_macaddrs; /* Maximum number of MAC addresses */
uint16_t base_vector;
uint16_t msix_intr; /* The MSIX interrupt binds to VSI */
- struct virtchnl_eth_stats eth_stats_offset;
+ struct iavf_eth_xstats eth_stats_offset;
};
struct rte_flow;
uint64_t supported_rxdid;
uint8_t *proto_xtr; /* proto xtr type for all queues */
volatile enum virtchnl_ops pend_cmd; /* pending command not finished */
+ uint32_t pend_cmd_count;
int cmd_retval; /* return value of the cmd response from PF */
uint8_t *aq_resp; /* buffer to store the adminq response from PF */
+ /** iAVF watchdog enable */
+ bool watchdog_enabled;
+
/* Event from pf */
bool dev_closed;
bool link_up;
rte_spinlock_t flow_ops_lock;
struct iavf_parser_list rss_parser_list;
struct iavf_parser_list dist_parser_list;
+ struct iavf_parser_list ipsec_crypto_parser_list;
struct iavf_fdir_info fdir; /* flow director info */
/* indicate large VF support enabled or not */
IAVF_PROTO_XTR_IPV6_FLOW,
IAVF_PROTO_XTR_TCP,
IAVF_PROTO_XTR_IP_OFFSET,
+ IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID,
IAVF_PROTO_XTR_MAX,
};
uint8_t proto_xtr[IAVF_MAX_QUEUE_NUM];
};
+struct iavf_security_ctx;
+
/* Structure to store private data for each VF instance. */
struct iavf_adapter {
struct iavf_hw hw;
struct rte_eth_dev_data *dev_data;
struct iavf_info vf;
+ struct iavf_security_ctx *security_ctx;
bool rx_bulk_alloc_allowed;
/* For vector PMD */
bool rx_vec_allowed;
bool tx_vec_allowed;
- const uint32_t *ptype_tbl;
+ uint32_t ptype_tbl[IAVF_MAX_PKT_TYPE] __rte_cache_min_aligned;
bool stopped;
uint16_t fdir_ref_cnt;
struct iavf_devargs devargs;
(&((struct iavf_adapter *)adapter)->vf)
#define IAVF_DEV_PRIVATE_TO_HW(adapter) \
(&((struct iavf_adapter *)adapter)->hw)
+#define IAVF_DEV_PRIVATE_TO_IAVF_SECURITY_CTX(adapter) \
+ (((struct iavf_adapter *)adapter)->security_ctx)
/* IAVF_VSI_TO */
#define IAVF_VSI_TO_HW(vsi) \
static inline int
_atomic_set_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
{
- int ret = rte_atomic32_cmpset((volatile uint32_t *)&vf->pend_cmd,
- VIRTCHNL_OP_UNKNOWN, ops);
+ enum virtchnl_ops op_unk = VIRTCHNL_OP_UNKNOWN;
+ int ret = __atomic_compare_exchange(&vf->pend_cmd, &op_unk, &ops,
+ 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
if (!ret)
PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
+ __atomic_store_n(&vf->pend_cmd_count, 1, __ATOMIC_RELAXED);
+
return !ret;
}
+/* Check there is pending cmd in execution. If none, set new command. */
+static inline int
+_atomic_set_async_response_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
+{
+ enum virtchnl_ops op_unk = VIRTCHNL_OP_UNKNOWN;
+ int ret = __atomic_compare_exchange(&vf->pend_cmd, &op_unk, &ops,
+ 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+
+ if (!ret)
+ PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
+
+ __atomic_store_n(&vf->pend_cmd_count, 2, __ATOMIC_RELAXED);
+
+ return !ret;
+}
int iavf_check_api_version(struct iavf_adapter *adapter);
int iavf_get_vf_resource(struct iavf_adapter *adapter);
void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev);
uint16_t size);
void iavf_tm_conf_init(struct rte_eth_dev *dev);
void iavf_tm_conf_uninit(struct rte_eth_dev *dev);
+int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
+ uint8_t *msg, size_t msg_len,
+ uint8_t *resp_msg, size_t resp_msg_len);
extern const struct rte_tm_ops iavf_tm_ops;
#endif /* _IAVF_ETHDEV_H_ */